aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/grpc/test
diff options
context:
space:
mode:
authordvshkurko <dvshkurko@yandex-team.ru>2022-02-10 16:45:52 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:52 +0300
commitc768a99151e47c3a4bb7b92c514d256abd301c4d (patch)
tree1a2c5ffcf89eb53ecd79dbc9bc0a195c27404d0c /contrib/libs/grpc/test
parent321ee9bce31ec6e238be26dbcbe539cffa2c3309 (diff)
downloadydb-c768a99151e47c3a4bb7b92c514d256abd301c4d.tar.gz
Restoring authorship annotation for <dvshkurko@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/grpc/test')
-rw-r--r--contrib/libs/grpc/test/core/util/cmdline.cc596
-rw-r--r--contrib/libs/grpc/test/core/util/cmdline.h158
-rw-r--r--contrib/libs/grpc/test/core/util/cmdline_test.cc962
-rw-r--r--contrib/libs/grpc/test/core/util/debugger_macros.cc114
-rw-r--r--contrib/libs/grpc/test/core/util/debugger_macros.h54
-rw-r--r--contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc260
-rwxr-xr-xcontrib/libs/grpc/test/core/util/fuzzer_one_entry_runner.sh36
-rw-r--r--contrib/libs/grpc/test/core/util/fuzzer_util.cc164
-rw-r--r--contrib/libs/grpc/test/core/util/fuzzer_util.h98
-rw-r--r--contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl54
-rw-r--r--contrib/libs/grpc/test/core/util/grpc_profiler.cc90
-rw-r--r--contrib/libs/grpc/test/core/util/grpc_profiler.h50
-rw-r--r--contrib/libs/grpc/test/core/util/histogram.cc460
-rw-r--r--contrib/libs/grpc/test/core/util/histogram.h124
-rw-r--r--contrib/libs/grpc/test/core/util/histogram_test.cc326
-rw-r--r--contrib/libs/grpc/test/core/util/lsan_suppressions.txt12
-rw-r--r--contrib/libs/grpc/test/core/util/memory_counters.cc338
-rw-r--r--contrib/libs/grpc/test/core/util/memory_counters.h106
-rw-r--r--contrib/libs/grpc/test/core/util/mock_endpoint.cc280
-rw-r--r--contrib/libs/grpc/test/core/util/mock_endpoint.h58
-rw-r--r--contrib/libs/grpc/test/core/util/one_corpus_entry_fuzzer.cc96
-rw-r--r--contrib/libs/grpc/test/core/util/parse_hexstring.cc112
-rw-r--r--contrib/libs/grpc/test/core/util/parse_hexstring.h52
-rw-r--r--contrib/libs/grpc/test/core/util/passthru_endpoint.cc442
-rw-r--r--contrib/libs/grpc/test/core/util/passthru_endpoint.h86
-rw-r--r--contrib/libs/grpc/test/core/util/port.cc256
-rw-r--r--contrib/libs/grpc/test/core/util/port.h88
-rw-r--r--contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc110
-rw-r--r--contrib/libs/grpc/test/core/util/port_server_client.cc430
-rw-r--r--contrib/libs/grpc/test/core/util/port_server_client.h60
-rw-r--r--contrib/libs/grpc/test/core/util/reconnect_server.cc228
-rw-r--r--contrib/libs/grpc/test/core/util/reconnect_server.h88
-rwxr-xr-xcontrib/libs/grpc/test/core/util/run_with_poller.sh38
-rw-r--r--contrib/libs/grpc/test/core/util/slice_splitter.cc252
-rw-r--r--contrib/libs/grpc/test/core/util/slice_splitter.h106
-rw-r--r--contrib/libs/grpc/test/core/util/subprocess.h72
-rw-r--r--contrib/libs/grpc/test/core/util/subprocess_posix.cc200
-rw-r--r--contrib/libs/grpc/test/core/util/subprocess_windows.cc252
-rw-r--r--contrib/libs/grpc/test/core/util/test_config.cc708
-rw-r--r--contrib/libs/grpc/test/core/util/test_config.h114
-rw-r--r--contrib/libs/grpc/test/core/util/test_lb_policies.cc386
-rw-r--r--contrib/libs/grpc/test/core/util/test_lb_policies.h64
-rw-r--r--contrib/libs/grpc/test/core/util/test_tcp_server.cc210
-rw-r--r--contrib/libs/grpc/test/core/util/test_tcp_server.h72
-rw-r--r--contrib/libs/grpc/test/core/util/tracer_util.cc62
-rw-r--r--contrib/libs/grpc/test/core/util/tracer_util.h64
-rw-r--r--contrib/libs/grpc/test/core/util/trickle_endpoint.cc416
-rw-r--r--contrib/libs/grpc/test/core/util/trickle_endpoint.h64
-rw-r--r--contrib/libs/grpc/test/core/util/tsan_suppressions.txt26
-rw-r--r--contrib/libs/grpc/test/core/util/ubsan_suppressions.txt50
-rw-r--r--contrib/libs/grpc/test/core/util/ya.make86
-rw-r--r--contrib/libs/grpc/test/cpp/README-iOS.md104
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc3730
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc1530
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc2546
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc288
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc158
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc2036
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc3524
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/end2end_test.cc4348
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/exception_test.cc246
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc676
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc838
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc3514
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/health/ya.make62
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc724
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc1924
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc308
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/interceptors_util.h594
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/mock_test.cc832
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc380
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc290
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc740
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc516
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc312
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc142
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc460
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make60
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc1384
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc368
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc334
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc386
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc194
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h112
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc1028
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/test_service_impl.h218
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/thread/ya.make_60
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc882
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/time_change_test.cc724
-rw-r--r--contrib/libs/grpc/test/cpp/end2end/ya.make112
-rw-r--r--contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc12
-rw-r--r--contrib/libs/grpc/test/cpp/util/cli_call.h2
-rw-r--r--contrib/libs/grpc/test/cpp/util/cli_call_test.cc4
-rw-r--r--contrib/libs/grpc/test/cpp/util/cli_credentials.cc4
-rw-r--r--contrib/libs/grpc/test/cpp/util/create_test_channel.cc182
-rw-r--r--contrib/libs/grpc/test/cpp/util/create_test_channel.h58
-rw-r--r--contrib/libs/grpc/test/cpp/util/grpc_tool.cc16
-rw-r--r--contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc6
-rw-r--r--contrib/libs/grpc/test/cpp/util/metrics_server.cc4
-rw-r--r--contrib/libs/grpc/test/cpp/util/metrics_server.h4
-rw-r--r--contrib/libs/grpc/test/cpp/util/proto_file_parser.h4
-rw-r--r--contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc18
-rw-r--r--contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h2
-rw-r--r--contrib/libs/grpc/test/cpp/util/slice_test.cc12
-rw-r--r--contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc76
-rw-r--r--contrib/libs/grpc/test/cpp/util/ya.make6
106 files changed, 23362 insertions, 23362 deletions
diff --git a/contrib/libs/grpc/test/core/util/cmdline.cc b/contrib/libs/grpc/test/core/util/cmdline.cc
index 55d801ec7d..62b47f927a 100644
--- a/contrib/libs/grpc/test/core/util/cmdline.cc
+++ b/contrib/libs/grpc/test/core/util/cmdline.cc
@@ -1,319 +1,319 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/cmdline.h"
-
-#include <limits.h>
-#include <stdio.h>
-#include <string.h>
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/cmdline.h"
+
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+
#include <vector>
#include "y_absl/strings/str_cat.h"
#include "y_absl/strings/str_format.h"
#include "y_absl/strings/str_join.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include "src/core/lib/gpr/string.h"
-
-typedef enum { ARGTYPE_INT, ARGTYPE_BOOL, ARGTYPE_STRING } argtype;
-
-typedef struct arg {
- const char* name;
- const char* help;
- argtype type;
- void* value;
- struct arg* next;
-} arg;
-
-struct gpr_cmdline {
- const char* description;
- arg* args;
- const char* argv0;
-
- const char* extra_arg_name;
- const char* extra_arg_help;
- void (*extra_arg)(void* user_data, const char* arg);
- void* extra_arg_user_data;
-
- int (*state)(gpr_cmdline* cl, char* arg);
- arg* cur_arg;
-
- int survive_failure;
-};
-
-static int normal_state(gpr_cmdline* cl, char* arg);
-
-gpr_cmdline* gpr_cmdline_create(const char* description) {
- gpr_cmdline* cl = static_cast<gpr_cmdline*>(gpr_zalloc(sizeof(gpr_cmdline)));
-
- cl->description = description;
- cl->state = normal_state;
-
- return cl;
-}
-
-void gpr_cmdline_set_survive_failure(gpr_cmdline* cl) {
- cl->survive_failure = 1;
-}
-
-void gpr_cmdline_destroy(gpr_cmdline* cl) {
- while (cl->args) {
- arg* a = cl->args;
- cl->args = a->next;
- gpr_free(a);
- }
- gpr_free(cl);
-}
-
-static void add_arg(gpr_cmdline* cl, const char* name, const char* help,
- argtype type, void* value) {
- arg* a;
-
- for (a = cl->args; a; a = a->next) {
- GPR_ASSERT(0 != strcmp(a->name, name));
- }
-
- a = static_cast<arg*>(gpr_zalloc(sizeof(arg)));
- a->name = name;
- a->help = help;
- a->type = type;
- a->value = value;
- a->next = cl->args;
- cl->args = a;
-}
-
-void gpr_cmdline_add_int(gpr_cmdline* cl, const char* name, const char* help,
- int* value) {
- add_arg(cl, name, help, ARGTYPE_INT, value);
-}
-
-void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
- int* value) {
- add_arg(cl, name, help, ARGTYPE_BOOL, value);
-}
-
-void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
- const char** value) {
- add_arg(cl, name, help, ARGTYPE_STRING, value);
-}
-
-void gpr_cmdline_on_extra_arg(
- gpr_cmdline* cl, const char* name, const char* help,
- void (*on_extra_arg)(void* user_data, const char* arg), void* user_data) {
- GPR_ASSERT(!cl->extra_arg);
- GPR_ASSERT(on_extra_arg);
-
- cl->extra_arg = on_extra_arg;
- cl->extra_arg_user_data = user_data;
- cl->extra_arg_name = name;
- cl->extra_arg_help = help;
-}
-
-/* recursively descend argument list, adding the last element
- to s first - so that arguments are added in the order they were
- added to the list by api calls */
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include "src/core/lib/gpr/string.h"
+
+typedef enum { ARGTYPE_INT, ARGTYPE_BOOL, ARGTYPE_STRING } argtype;
+
+typedef struct arg {
+ const char* name;
+ const char* help;
+ argtype type;
+ void* value;
+ struct arg* next;
+} arg;
+
+struct gpr_cmdline {
+ const char* description;
+ arg* args;
+ const char* argv0;
+
+ const char* extra_arg_name;
+ const char* extra_arg_help;
+ void (*extra_arg)(void* user_data, const char* arg);
+ void* extra_arg_user_data;
+
+ int (*state)(gpr_cmdline* cl, char* arg);
+ arg* cur_arg;
+
+ int survive_failure;
+};
+
+static int normal_state(gpr_cmdline* cl, char* arg);
+
+gpr_cmdline* gpr_cmdline_create(const char* description) {
+ gpr_cmdline* cl = static_cast<gpr_cmdline*>(gpr_zalloc(sizeof(gpr_cmdline)));
+
+ cl->description = description;
+ cl->state = normal_state;
+
+ return cl;
+}
+
+void gpr_cmdline_set_survive_failure(gpr_cmdline* cl) {
+ cl->survive_failure = 1;
+}
+
+void gpr_cmdline_destroy(gpr_cmdline* cl) {
+ while (cl->args) {
+ arg* a = cl->args;
+ cl->args = a->next;
+ gpr_free(a);
+ }
+ gpr_free(cl);
+}
+
+static void add_arg(gpr_cmdline* cl, const char* name, const char* help,
+ argtype type, void* value) {
+ arg* a;
+
+ for (a = cl->args; a; a = a->next) {
+ GPR_ASSERT(0 != strcmp(a->name, name));
+ }
+
+ a = static_cast<arg*>(gpr_zalloc(sizeof(arg)));
+ a->name = name;
+ a->help = help;
+ a->type = type;
+ a->value = value;
+ a->next = cl->args;
+ cl->args = a;
+}
+
+void gpr_cmdline_add_int(gpr_cmdline* cl, const char* name, const char* help,
+ int* value) {
+ add_arg(cl, name, help, ARGTYPE_INT, value);
+}
+
+void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
+ int* value) {
+ add_arg(cl, name, help, ARGTYPE_BOOL, value);
+}
+
+void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
+ const char** value) {
+ add_arg(cl, name, help, ARGTYPE_STRING, value);
+}
+
+void gpr_cmdline_on_extra_arg(
+ gpr_cmdline* cl, const char* name, const char* help,
+ void (*on_extra_arg)(void* user_data, const char* arg), void* user_data) {
+ GPR_ASSERT(!cl->extra_arg);
+ GPR_ASSERT(on_extra_arg);
+
+ cl->extra_arg = on_extra_arg;
+ cl->extra_arg_user_data = user_data;
+ cl->extra_arg_name = name;
+ cl->extra_arg_help = help;
+}
+
+/* recursively descend argument list, adding the last element
+ to s first - so that arguments are added in the order they were
+ added to the list by api calls */
static void add_args_to_usage(arg* a, std::vector<TString>* s) {
if (a == nullptr) return;
add_args_to_usage(a->next, s);
- switch (a->type) {
- case ARGTYPE_BOOL:
+ switch (a->type) {
+ case ARGTYPE_BOOL:
s->push_back(y_absl::StrFormat(" [--%s|--no-%s]", a->name, a->name));
- break;
- case ARGTYPE_STRING:
+ break;
+ case ARGTYPE_STRING:
s->push_back(y_absl::StrFormat(" [--%s=string]", a->name));
- break;
- case ARGTYPE_INT:
+ break;
+ case ARGTYPE_INT:
s->push_back(y_absl::StrFormat(" [--%s=int]", a->name));
- break;
- }
-}
-
+ break;
+ }
+}
+
TString gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0) {
- const char* name = strrchr(argv0, '/');
+ const char* name = strrchr(argv0, '/');
if (name != nullptr) {
- name++;
- } else {
- name = argv0;
- }
-
+ name++;
+ } else {
+ name = argv0;
+ }
+
std::vector<TString> s;
s.push_back(y_absl::StrCat("Usage: ", name));
add_args_to_usage(cl->args, &s);
- if (cl->extra_arg) {
+ if (cl->extra_arg) {
s.push_back(y_absl::StrFormat(" [%s...]", cl->extra_arg_name));
- }
+ }
s.push_back("\n");
return y_absl::StrJoin(s, "");
-}
-
-static int print_usage_and_die(gpr_cmdline* cl) {
+}
+
+static int print_usage_and_die(gpr_cmdline* cl) {
fprintf(stderr, "%s", gpr_cmdline_usage_string(cl, cl->argv0).c_str());
- if (!cl->survive_failure) {
- exit(1);
- }
- return 0;
-}
-
-static int extra_state(gpr_cmdline* cl, char* str) {
- if (!cl->extra_arg) {
- return print_usage_and_die(cl);
- }
- cl->extra_arg(cl->extra_arg_user_data, str);
- return 1;
-}
-
-static arg* find_arg(gpr_cmdline* cl, char* name) {
- arg* a;
-
- for (a = cl->args; a; a = a->next) {
- if (0 == strcmp(a->name, name)) {
- break;
- }
- }
-
- if (!a) {
- fprintf(stderr, "Unknown argument: %s\n", name);
- return nullptr;
- }
-
- return a;
-}
-
-static int value_state(gpr_cmdline* cl, char* str) {
- long intval;
- char* end;
-
- GPR_ASSERT(cl->cur_arg);
-
- switch (cl->cur_arg->type) {
- case ARGTYPE_INT:
- intval = strtol(str, &end, 0);
- if (*end || intval < INT_MIN || intval > INT_MAX) {
- fprintf(stderr, "expected integer, got '%s' for %s\n", str,
- cl->cur_arg->name);
- return print_usage_and_die(cl);
- }
- *static_cast<int*>(cl->cur_arg->value) = static_cast<int>(intval);
- break;
- case ARGTYPE_BOOL:
- if (0 == strcmp(str, "1") || 0 == strcmp(str, "true")) {
- *static_cast<int*>(cl->cur_arg->value) = 1;
- } else if (0 == strcmp(str, "0") || 0 == strcmp(str, "false")) {
- *static_cast<int*>(cl->cur_arg->value) = 0;
- } else {
- fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
- cl->cur_arg->name);
- return print_usage_and_die(cl);
- }
- break;
- case ARGTYPE_STRING:
- *static_cast<char**>(cl->cur_arg->value) = str;
- break;
- }
-
- cl->state = normal_state;
- return 1;
-}
-
-static int normal_state(gpr_cmdline* cl, char* str) {
- char* eq = nullptr;
- char* tmp = nullptr;
- char* arg_name = nullptr;
- int r = 1;
-
- if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
- 0 == strcmp(str, "-h")) {
- return print_usage_and_die(cl);
- }
-
- cl->cur_arg = nullptr;
-
- if (str[0] == '-') {
- if (str[1] == '-') {
- if (str[2] == 0) {
- /* handle '--' to move to just extra args */
- cl->state = extra_state;
- return 1;
- }
- str += 2;
- } else {
- str += 1;
- }
- /* first byte of str is now past the leading '-' or '--' */
- if (str[0] == 'n' && str[1] == 'o' && str[2] == '-') {
- /* str is of the form '--no-foo' - it's a flag disable */
- str += 3;
- cl->cur_arg = find_arg(cl, str);
- if (cl->cur_arg == nullptr) {
- return print_usage_and_die(cl);
- }
- if (cl->cur_arg->type != ARGTYPE_BOOL) {
- fprintf(stderr, "%s is not a flag argument\n", str);
- return print_usage_and_die(cl);
- }
- *static_cast<int*>(cl->cur_arg->value) = 0;
- return 1; /* early out */
- }
- eq = strchr(str, '=');
- if (eq != nullptr) {
- /* copy the string into a temp buffer and extract the name */
- tmp = arg_name =
- static_cast<char*>(gpr_malloc(static_cast<size_t>(eq - str + 1)));
- memcpy(arg_name, str, static_cast<size_t>(eq - str));
- arg_name[eq - str] = 0;
- } else {
- arg_name = str;
- }
- cl->cur_arg = find_arg(cl, arg_name);
- if (cl->cur_arg == nullptr) {
- return print_usage_and_die(cl);
- }
- if (eq != nullptr) {
- /* str was of the type --foo=value, parse the value */
- r = value_state(cl, eq + 1);
- } else if (cl->cur_arg->type != ARGTYPE_BOOL) {
- /* flag types don't have a '--foo value' variant, other types do */
- cl->state = value_state;
- } else {
- /* flag parameter: just set the value */
- *static_cast<int*>(cl->cur_arg->value) = 1;
- }
- } else {
- r = extra_state(cl, str);
- }
-
- gpr_free(tmp);
- return r;
-}
-
-int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv) {
- int i;
-
- GPR_ASSERT(argc >= 1);
- cl->argv0 = argv[0];
-
- for (i = 1; i < argc; i++) {
- if (!cl->state(cl, argv[i])) {
- return 0;
- }
- }
- return 1;
-}
+ if (!cl->survive_failure) {
+ exit(1);
+ }
+ return 0;
+}
+
+static int extra_state(gpr_cmdline* cl, char* str) {
+ if (!cl->extra_arg) {
+ return print_usage_and_die(cl);
+ }
+ cl->extra_arg(cl->extra_arg_user_data, str);
+ return 1;
+}
+
+static arg* find_arg(gpr_cmdline* cl, char* name) {
+ arg* a;
+
+ for (a = cl->args; a; a = a->next) {
+ if (0 == strcmp(a->name, name)) {
+ break;
+ }
+ }
+
+ if (!a) {
+ fprintf(stderr, "Unknown argument: %s\n", name);
+ return nullptr;
+ }
+
+ return a;
+}
+
+static int value_state(gpr_cmdline* cl, char* str) {
+ long intval;
+ char* end;
+
+ GPR_ASSERT(cl->cur_arg);
+
+ switch (cl->cur_arg->type) {
+ case ARGTYPE_INT:
+ intval = strtol(str, &end, 0);
+ if (*end || intval < INT_MIN || intval > INT_MAX) {
+ fprintf(stderr, "expected integer, got '%s' for %s\n", str,
+ cl->cur_arg->name);
+ return print_usage_and_die(cl);
+ }
+ *static_cast<int*>(cl->cur_arg->value) = static_cast<int>(intval);
+ break;
+ case ARGTYPE_BOOL:
+ if (0 == strcmp(str, "1") || 0 == strcmp(str, "true")) {
+ *static_cast<int*>(cl->cur_arg->value) = 1;
+ } else if (0 == strcmp(str, "0") || 0 == strcmp(str, "false")) {
+ *static_cast<int*>(cl->cur_arg->value) = 0;
+ } else {
+ fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
+ cl->cur_arg->name);
+ return print_usage_and_die(cl);
+ }
+ break;
+ case ARGTYPE_STRING:
+ *static_cast<char**>(cl->cur_arg->value) = str;
+ break;
+ }
+
+ cl->state = normal_state;
+ return 1;
+}
+
+static int normal_state(gpr_cmdline* cl, char* str) {
+ char* eq = nullptr;
+ char* tmp = nullptr;
+ char* arg_name = nullptr;
+ int r = 1;
+
+ if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
+ 0 == strcmp(str, "-h")) {
+ return print_usage_and_die(cl);
+ }
+
+ cl->cur_arg = nullptr;
+
+ if (str[0] == '-') {
+ if (str[1] == '-') {
+ if (str[2] == 0) {
+ /* handle '--' to move to just extra args */
+ cl->state = extra_state;
+ return 1;
+ }
+ str += 2;
+ } else {
+ str += 1;
+ }
+ /* first byte of str is now past the leading '-' or '--' */
+ if (str[0] == 'n' && str[1] == 'o' && str[2] == '-') {
+ /* str is of the form '--no-foo' - it's a flag disable */
+ str += 3;
+ cl->cur_arg = find_arg(cl, str);
+ if (cl->cur_arg == nullptr) {
+ return print_usage_and_die(cl);
+ }
+ if (cl->cur_arg->type != ARGTYPE_BOOL) {
+ fprintf(stderr, "%s is not a flag argument\n", str);
+ return print_usage_and_die(cl);
+ }
+ *static_cast<int*>(cl->cur_arg->value) = 0;
+ return 1; /* early out */
+ }
+ eq = strchr(str, '=');
+ if (eq != nullptr) {
+ /* copy the string into a temp buffer and extract the name */
+ tmp = arg_name =
+ static_cast<char*>(gpr_malloc(static_cast<size_t>(eq - str + 1)));
+ memcpy(arg_name, str, static_cast<size_t>(eq - str));
+ arg_name[eq - str] = 0;
+ } else {
+ arg_name = str;
+ }
+ cl->cur_arg = find_arg(cl, arg_name);
+ if (cl->cur_arg == nullptr) {
+ return print_usage_and_die(cl);
+ }
+ if (eq != nullptr) {
+ /* str was of the type --foo=value, parse the value */
+ r = value_state(cl, eq + 1);
+ } else if (cl->cur_arg->type != ARGTYPE_BOOL) {
+ /* flag types don't have a '--foo value' variant, other types do */
+ cl->state = value_state;
+ } else {
+ /* flag parameter: just set the value */
+ *static_cast<int*>(cl->cur_arg->value) = 1;
+ }
+ } else {
+ r = extra_state(cl, str);
+ }
+
+ gpr_free(tmp);
+ return r;
+}
+
+int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv) {
+ int i;
+
+ GPR_ASSERT(argc >= 1);
+ cl->argv0 = argv[0];
+
+ for (i = 1; i < argc; i++) {
+ if (!cl->state(cl, argv[i])) {
+ return 0;
+ }
+ }
+ return 1;
+}
diff --git a/contrib/libs/grpc/test/core/util/cmdline.h b/contrib/libs/grpc/test/core/util/cmdline.h
index 6a06bbfe43..cc75a8974e 100644
--- a/contrib/libs/grpc/test/core/util/cmdline.h
+++ b/contrib/libs/grpc/test/core/util/cmdline.h
@@ -1,82 +1,82 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_CMDLINE_H
-#define GRPC_TEST_CORE_UTIL_CMDLINE_H
-
-#include <grpc/support/port_platform.h>
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_CMDLINE_H
+#define GRPC_TEST_CORE_UTIL_CMDLINE_H
+
+#include <grpc/support/port_platform.h>
+
#include <util/generic/string.h>
-/** Simple command line parser.
-
- Supports flags that can be specified as -foo, --foo, --no-foo, -no-foo, etc
- And integers, strings that can be specified as -foo=4, -foo blah, etc
-
- No support for short command line options (but we may get that in the
- future.)
-
- Usage (for a program with a single flag argument 'foo'):
-
- int main(int argc, char **argv) {
- gpr_cmdline *cl;
- int verbose = 0;
-
- cl = gpr_cmdline_create("My cool tool");
- gpr_cmdline_add_int(cl, "verbose", "Produce verbose output?", &verbose);
- gpr_cmdline_parse(cl, argc, argv);
- gpr_cmdline_destroy(cl);
-
- if (verbose) {
- gpr_log(GPR_INFO, "Goodbye cruel world!");
- }
-
- return 0;
- } */
-
-typedef struct gpr_cmdline gpr_cmdline;
-
-/** Construct a command line parser: takes a short description of the tool
- doing the parsing */
-gpr_cmdline* gpr_cmdline_create(const char* description);
-/** Add an integer parameter, with a name (used on the command line) and some
- helpful text (used in the command usage) */
-void gpr_cmdline_add_int(gpr_cmdline* cl, const char* name, const char* help,
- int* value);
-/** The same, for a boolean flag */
-void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
- int* value);
-/** And for a string */
-void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
- const char** value);
-/** Set a callback for non-named arguments */
-void gpr_cmdline_on_extra_arg(
- gpr_cmdline* cl, const char* name, const char* help,
- void (*on_extra_arg)(void* user_data, const char* arg), void* user_data);
-/** Enable surviving failure: default behavior is to exit the process */
-void gpr_cmdline_set_survive_failure(gpr_cmdline* cl);
-/** Parse the command line; returns 1 on success, on failure either dies
- (by default) or returns 0 if gpr_cmdline_set_survive_failure() has been
- called */
-int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv);
-/** Destroy the parser */
-void gpr_cmdline_destroy(gpr_cmdline* cl);
-/** Get a string describing usage */
+/** Simple command line parser.
+
+ Supports flags that can be specified as -foo, --foo, --no-foo, -no-foo, etc
+ And integers, strings that can be specified as -foo=4, -foo blah, etc
+
+ No support for short command line options (but we may get that in the
+ future.)
+
+ Usage (for a program with a single flag argument 'foo'):
+
+ int main(int argc, char **argv) {
+ gpr_cmdline *cl;
+ int verbose = 0;
+
+ cl = gpr_cmdline_create("My cool tool");
+ gpr_cmdline_add_int(cl, "verbose", "Produce verbose output?", &verbose);
+ gpr_cmdline_parse(cl, argc, argv);
+ gpr_cmdline_destroy(cl);
+
+ if (verbose) {
+ gpr_log(GPR_INFO, "Goodbye cruel world!");
+ }
+
+ return 0;
+ } */
+
+typedef struct gpr_cmdline gpr_cmdline;
+
+/** Construct a command line parser: takes a short description of the tool
+ doing the parsing */
+gpr_cmdline* gpr_cmdline_create(const char* description);
+/** Add an integer parameter, with a name (used on the command line) and some
+ helpful text (used in the command usage) */
+void gpr_cmdline_add_int(gpr_cmdline* cl, const char* name, const char* help,
+ int* value);
+/** The same, for a boolean flag */
+void gpr_cmdline_add_flag(gpr_cmdline* cl, const char* name, const char* help,
+ int* value);
+/** And for a string */
+void gpr_cmdline_add_string(gpr_cmdline* cl, const char* name, const char* help,
+ const char** value);
+/** Set a callback for non-named arguments */
+void gpr_cmdline_on_extra_arg(
+ gpr_cmdline* cl, const char* name, const char* help,
+ void (*on_extra_arg)(void* user_data, const char* arg), void* user_data);
+/** Enable surviving failure: default behavior is to exit the process */
+void gpr_cmdline_set_survive_failure(gpr_cmdline* cl);
+/** Parse the command line; returns 1 on success, on failure either dies
+ (by default) or returns 0 if gpr_cmdline_set_survive_failure() has been
+ called */
+int gpr_cmdline_parse(gpr_cmdline* cl, int argc, char** argv);
+/** Destroy the parser */
+void gpr_cmdline_destroy(gpr_cmdline* cl);
+/** Get a string describing usage */
TString gpr_cmdline_usage_string(gpr_cmdline* cl, const char* argv0);
-
-#endif /* GRPC_TEST_CORE_UTIL_CMDLINE_H */
+
+#endif /* GRPC_TEST_CORE_UTIL_CMDLINE_H */
diff --git a/contrib/libs/grpc/test/core/util/cmdline_test.cc b/contrib/libs/grpc/test/core/util/cmdline_test.cc
index 10628d18f0..b1b7da6b17 100644
--- a/contrib/libs/grpc/test/core/util/cmdline_test.cc
+++ b/contrib/libs/grpc/test/core/util/cmdline_test.cc
@@ -1,488 +1,488 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/gpr/useful.h"
-#include "test/core/util/cmdline.h"
-#include "test/core/util/test_config.h"
-
-#define LOG_TEST() gpr_log(GPR_INFO, "test at %s:%d", __FILE__, __LINE__)
-
-static void test_simple_int(void) {
- int x = 1;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("-foo"),
- const_cast<char*>("3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_int(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 1);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 3);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_eq_int(void) {
- int x = 1;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("-foo=3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_int(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 1);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 3);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_2dash_int(void) {
- int x = 1;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo"),
- const_cast<char*>("3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_int(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 1);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 3);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_2dash_eq_int(void) {
- int x = 1;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_int(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 1);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 3);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_simple_string(void) {
- const char* x = nullptr;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("-foo"),
- const_cast<char*>("3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_string(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == nullptr);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(0 == strcmp(x, "3"));
- gpr_cmdline_destroy(cl);
-}
-
-static void test_eq_string(void) {
- const char* x = nullptr;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("-foo=3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_string(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == nullptr);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(0 == strcmp(x, "3"));
- gpr_cmdline_destroy(cl);
-}
-
-static void test_2dash_string(void) {
- const char* x = nullptr;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo"),
- const_cast<char*>("3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_string(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == nullptr);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(0 == strcmp(x, "3"));
- gpr_cmdline_destroy(cl);
-}
-
-static void test_2dash_eq_string(void) {
- const char* x = nullptr;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=3")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_string(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == nullptr);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(0 == strcmp(x, "3"));
- gpr_cmdline_destroy(cl);
-}
-
-static void test_flag_on(void) {
- int x = 2;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 2);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 1);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_flag_no(void) {
- int x = 2;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--no-foo")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 2);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 0);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_flag_val_1(void) {
- int x = 2;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=1")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 2);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 1);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_flag_val_0(void) {
- int x = 2;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=0")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 2);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 0);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_flag_val_true(void) {
- int x = 2;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=true")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 2);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 1);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_flag_val_false(void) {
- int x = 2;
- gpr_cmdline* cl;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=false")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
- GPR_ASSERT(x == 2);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 0);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_many(void) {
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
- gpr_cmdline* cl;
-
- char* args[] = {(char*)__FILE__, const_cast<char*>("--str"),
- const_cast<char*>("hello"), const_cast<char*>("-x=4"),
- const_cast<char*>("-no-flag")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(x == 4);
- GPR_ASSERT(0 == strcmp(str, "hello"));
- GPR_ASSERT(flag == 0);
- gpr_cmdline_destroy(cl);
-}
-
-static void extra_arg_cb(void* user_data, const char* arg) {
- int* count = static_cast<int*>(user_data);
- GPR_ASSERT(arg != nullptr);
- GPR_ASSERT(strlen(arg) == 1);
- GPR_ASSERT(arg[0] == 'a' + *count);
- ++*count;
-}
-
-static void test_extra(void) {
- gpr_cmdline* cl;
- int count = 0;
- char* args[] = {(char*)__FILE__, const_cast<char*>("a"),
- const_cast<char*>("b"), const_cast<char*>("c")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- &count);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(count == 3);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_extra_dashdash(void) {
- gpr_cmdline* cl;
- int count = 0;
- char* args[] = {(char*)__FILE__, const_cast<char*>("--"),
- const_cast<char*>("a"), const_cast<char*>("b"),
- const_cast<char*>("c")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- &count);
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
- GPR_ASSERT(count == 3);
- gpr_cmdline_destroy(cl);
-}
-
-static void test_usage(void) {
- gpr_cmdline* cl;
-
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- nullptr);
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/gpr/useful.h"
+#include "test/core/util/cmdline.h"
+#include "test/core/util/test_config.h"
+
+#define LOG_TEST() gpr_log(GPR_INFO, "test at %s:%d", __FILE__, __LINE__)
+
+static void test_simple_int(void) {
+ int x = 1;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("-foo"),
+ const_cast<char*>("3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_int(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 3);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_eq_int(void) {
+ int x = 1;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("-foo=3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_int(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 3);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_2dash_int(void) {
+ int x = 1;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo"),
+ const_cast<char*>("3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_int(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 3);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_2dash_eq_int(void) {
+ int x = 1;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_int(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 3);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_simple_string(void) {
+ const char* x = nullptr;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("-foo"),
+ const_cast<char*>("3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_string(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == nullptr);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(0 == strcmp(x, "3"));
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_eq_string(void) {
+ const char* x = nullptr;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("-foo=3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_string(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == nullptr);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(0 == strcmp(x, "3"));
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_2dash_string(void) {
+ const char* x = nullptr;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo"),
+ const_cast<char*>("3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_string(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == nullptr);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(0 == strcmp(x, "3"));
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_2dash_eq_string(void) {
+ const char* x = nullptr;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=3")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_string(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == nullptr);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(0 == strcmp(x, "3"));
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_flag_on(void) {
+ int x = 2;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 2);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_flag_no(void) {
+ int x = 2;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--no-foo")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 2);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 0);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_flag_val_1(void) {
+ int x = 2;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=1")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 2);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_flag_val_0(void) {
+ int x = 2;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=0")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 2);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 0);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_flag_val_true(void) {
+ int x = 2;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=true")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 2);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 1);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_flag_val_false(void) {
+ int x = 2;
+ gpr_cmdline* cl;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--foo=false")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_flag(cl, "foo", nullptr, &x);
+ GPR_ASSERT(x == 2);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 0);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_many(void) {
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+ gpr_cmdline* cl;
+
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--str"),
+ const_cast<char*>("hello"), const_cast<char*>("-x=4"),
+ const_cast<char*>("-no-flag")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(x == 4);
+ GPR_ASSERT(0 == strcmp(str, "hello"));
+ GPR_ASSERT(flag == 0);
+ gpr_cmdline_destroy(cl);
+}
+
+static void extra_arg_cb(void* user_data, const char* arg) {
+ int* count = static_cast<int*>(user_data);
+ GPR_ASSERT(arg != nullptr);
+ GPR_ASSERT(strlen(arg) == 1);
+ GPR_ASSERT(arg[0] == 'a' + *count);
+ ++*count;
+}
+
+static void test_extra(void) {
+ gpr_cmdline* cl;
+ int count = 0;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("a"),
+ const_cast<char*>("b"), const_cast<char*>("c")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ &count);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(count == 3);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_extra_dashdash(void) {
+ gpr_cmdline* cl;
+ int count = 0;
+ char* args[] = {(char*)__FILE__, const_cast<char*>("--"),
+ const_cast<char*>("a"), const_cast<char*>("b"),
+ const_cast<char*>("c")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ &count);
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(args), args);
+ GPR_ASSERT(count == 3);
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_usage(void) {
+ gpr_cmdline* cl;
+
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ nullptr);
+
TString usage = gpr_cmdline_usage_string(cl, "test");
GPR_ASSERT(usage ==
"Usage: test [--str=string] [--x=int] "
"[--flag|--no-flag] [file...]\n");
-
- usage = gpr_cmdline_usage_string(cl, "/foo/test");
+
+ usage = gpr_cmdline_usage_string(cl, "/foo/test");
GPR_ASSERT(usage ==
"Usage: test [--str=string] [--x=int] "
"[--flag|--no-flag] [file...]\n");
-
- gpr_cmdline_destroy(cl);
-}
-
-static void test_help(void) {
- gpr_cmdline* cl;
-
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
-
- char* help[] = {(char*)__FILE__, const_cast<char*>("-h")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_set_survive_failure(cl);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- nullptr);
-
- GPR_ASSERT(0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(help), help));
-
- gpr_cmdline_destroy(cl);
-}
-
-static void test_badargs1(void) {
- gpr_cmdline* cl;
-
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
-
- char* bad_arg_name[] = {(char*)__FILE__, const_cast<char*>("--y")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_set_survive_failure(cl);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- nullptr);
-
- GPR_ASSERT(0 ==
- gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_arg_name), bad_arg_name));
-
- gpr_cmdline_destroy(cl);
-}
-
-static void test_badargs2(void) {
- gpr_cmdline* cl;
-
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
-
- char* bad_int_value[] = {(char*)__FILE__, const_cast<char*>("--x"),
- const_cast<char*>("henry")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_set_survive_failure(cl);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- nullptr);
-
- GPR_ASSERT(
- 0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_int_value), bad_int_value));
-
- gpr_cmdline_destroy(cl);
-}
-
-static void test_badargs3(void) {
- gpr_cmdline* cl;
-
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
-
- char* bad_bool_value[] = {(char*)__FILE__, const_cast<char*>("--flag=henry")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_set_survive_failure(cl);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- nullptr);
-
- GPR_ASSERT(0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_bool_value),
- bad_bool_value));
-
- gpr_cmdline_destroy(cl);
-}
-
-static void test_badargs4(void) {
- gpr_cmdline* cl;
-
- const char* str = nullptr;
- int x = 0;
- int flag = 2;
-
- char* bad_bool_value[] = {(char*)__FILE__, const_cast<char*>("--no-str")};
-
- LOG_TEST();
-
- cl = gpr_cmdline_create(nullptr);
- gpr_cmdline_set_survive_failure(cl);
- gpr_cmdline_add_string(cl, "str", nullptr, &str);
- gpr_cmdline_add_int(cl, "x", nullptr, &x);
- gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
- gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
- nullptr);
-
- GPR_ASSERT(0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_bool_value),
- bad_bool_value));
-
- gpr_cmdline_destroy(cl);
-}
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- test_simple_int();
- test_eq_int();
- test_2dash_int();
- test_2dash_eq_int();
- test_simple_string();
- test_eq_string();
- test_2dash_string();
- test_2dash_eq_string();
- test_flag_on();
- test_flag_no();
- test_flag_val_1();
- test_flag_val_0();
- test_flag_val_true();
- test_flag_val_false();
- test_many();
- test_extra();
- test_extra_dashdash();
- test_usage();
- test_help();
- test_badargs1();
- test_badargs2();
- test_badargs3();
- test_badargs4();
- return 0;
-}
+
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_help(void) {
+ gpr_cmdline* cl;
+
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+
+ char* help[] = {(char*)__FILE__, const_cast<char*>("-h")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_set_survive_failure(cl);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ nullptr);
+
+ GPR_ASSERT(0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(help), help));
+
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_badargs1(void) {
+ gpr_cmdline* cl;
+
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+
+ char* bad_arg_name[] = {(char*)__FILE__, const_cast<char*>("--y")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_set_survive_failure(cl);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ nullptr);
+
+ GPR_ASSERT(0 ==
+ gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_arg_name), bad_arg_name));
+
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_badargs2(void) {
+ gpr_cmdline* cl;
+
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+
+ char* bad_int_value[] = {(char*)__FILE__, const_cast<char*>("--x"),
+ const_cast<char*>("henry")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_set_survive_failure(cl);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ nullptr);
+
+ GPR_ASSERT(
+ 0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_int_value), bad_int_value));
+
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_badargs3(void) {
+ gpr_cmdline* cl;
+
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+
+ char* bad_bool_value[] = {(char*)__FILE__, const_cast<char*>("--flag=henry")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_set_survive_failure(cl);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ nullptr);
+
+ GPR_ASSERT(0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_bool_value),
+ bad_bool_value));
+
+ gpr_cmdline_destroy(cl);
+}
+
+static void test_badargs4(void) {
+ gpr_cmdline* cl;
+
+ const char* str = nullptr;
+ int x = 0;
+ int flag = 2;
+
+ char* bad_bool_value[] = {(char*)__FILE__, const_cast<char*>("--no-str")};
+
+ LOG_TEST();
+
+ cl = gpr_cmdline_create(nullptr);
+ gpr_cmdline_set_survive_failure(cl);
+ gpr_cmdline_add_string(cl, "str", nullptr, &str);
+ gpr_cmdline_add_int(cl, "x", nullptr, &x);
+ gpr_cmdline_add_flag(cl, "flag", nullptr, &flag);
+ gpr_cmdline_on_extra_arg(cl, "file", "filenames to process", extra_arg_cb,
+ nullptr);
+
+ GPR_ASSERT(0 == gpr_cmdline_parse(cl, GPR_ARRAY_SIZE(bad_bool_value),
+ bad_bool_value));
+
+ gpr_cmdline_destroy(cl);
+}
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ test_simple_int();
+ test_eq_int();
+ test_2dash_int();
+ test_2dash_eq_int();
+ test_simple_string();
+ test_eq_string();
+ test_2dash_string();
+ test_2dash_eq_string();
+ test_flag_on();
+ test_flag_no();
+ test_flag_val_1();
+ test_flag_val_0();
+ test_flag_val_true();
+ test_flag_val_false();
+ test_many();
+ test_extra();
+ test_extra_dashdash();
+ test_usage();
+ test_help();
+ test_badargs1();
+ test_badargs2();
+ test_badargs3();
+ test_badargs4();
+ return 0;
+}
diff --git a/contrib/libs/grpc/test/core/util/debugger_macros.cc b/contrib/libs/grpc/test/core/util/debugger_macros.cc
index b866cfd647..fde68f3217 100644
--- a/contrib/libs/grpc/test/core/util/debugger_macros.cc
+++ b/contrib/libs/grpc/test/core/util/debugger_macros.cc
@@ -1,57 +1,57 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/*
- * A collection of 'macros' that help navigating the grpc object hierarchy
- * Not intended to be robust for main-line code, often cuts across abstraction
- * boundaries.
- */
-#include <stdio.h>
-
-#include "src/core/ext/filters/client_channel/client_channel.h"
-#include "src/core/ext/transport/chttp2/transport/internal.h"
-#include "src/core/lib/channel/connected_channel.h"
-#include "src/core/lib/surface/call.h"
-
-grpc_stream* grpc_transport_stream_from_call(grpc_call* call) {
- grpc_call_stack* cs = grpc_call_get_call_stack(call);
- for (;;) {
- grpc_call_element* el = grpc_call_stack_element(cs, cs->count - 1);
- if (el->filter == &grpc_client_channel_filter) {
- grpc_core::RefCountedPtr<grpc_core::SubchannelCall> scc =
- grpc_client_channel_get_subchannel_call(el);
- if (scc == nullptr) {
- fprintf(stderr, "No subchannel-call");
- fflush(stderr);
- return nullptr;
- }
- cs = scc->GetCallStack();
- } else if (el->filter == &grpc_connected_filter) {
- return grpc_connected_channel_get_stream(el);
- } else {
- fprintf(stderr, "Unrecognized filter: %s", el->filter->name);
- fflush(stderr);
- return nullptr;
- }
- }
-}
-
-grpc_chttp2_stream* grpc_chttp2_stream_from_call(grpc_call* call) {
- return reinterpret_cast<grpc_chttp2_stream*>(
- grpc_transport_stream_from_call(call));
-}
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/*
+ * A collection of 'macros' that help navigating the grpc object hierarchy
+ * Not intended to be robust for main-line code, often cuts across abstraction
+ * boundaries.
+ */
+#include <stdio.h>
+
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/channel/connected_channel.h"
+#include "src/core/lib/surface/call.h"
+
+grpc_stream* grpc_transport_stream_from_call(grpc_call* call) {
+ grpc_call_stack* cs = grpc_call_get_call_stack(call);
+ for (;;) {
+ grpc_call_element* el = grpc_call_stack_element(cs, cs->count - 1);
+ if (el->filter == &grpc_client_channel_filter) {
+ grpc_core::RefCountedPtr<grpc_core::SubchannelCall> scc =
+ grpc_client_channel_get_subchannel_call(el);
+ if (scc == nullptr) {
+ fprintf(stderr, "No subchannel-call");
+ fflush(stderr);
+ return nullptr;
+ }
+ cs = scc->GetCallStack();
+ } else if (el->filter == &grpc_connected_filter) {
+ return grpc_connected_channel_get_stream(el);
+ } else {
+ fprintf(stderr, "Unrecognized filter: %s", el->filter->name);
+ fflush(stderr);
+ return nullptr;
+ }
+ }
+}
+
+grpc_chttp2_stream* grpc_chttp2_stream_from_call(grpc_call* call) {
+ return reinterpret_cast<grpc_chttp2_stream*>(
+ grpc_transport_stream_from_call(call));
+}
diff --git a/contrib/libs/grpc/test/core/util/debugger_macros.h b/contrib/libs/grpc/test/core/util/debugger_macros.h
index a5a603661b..71228c6e87 100644
--- a/contrib/libs/grpc/test/core/util/debugger_macros.h
+++ b/contrib/libs/grpc/test/core/util/debugger_macros.h
@@ -1,27 +1,27 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_DEBUGGER_MACROS_H
-#define GRPC_TEST_CORE_UTIL_DEBUGGER_MACROS_H
-
-#include "src/core/ext/transport/chttp2/transport/internal.h"
-#include "src/core/lib/surface/call.h"
-
-grpc_chttp2_stream* grpc_chttp2_stream_from_call(grpc_call* call);
-
-#endif /* GRPC_TEST_CORE_UTIL_DEBUGGER_MACROS_H */
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_DEBUGGER_MACROS_H
+#define GRPC_TEST_CORE_UTIL_DEBUGGER_MACROS_H
+
+#include "src/core/ext/transport/chttp2/transport/internal.h"
+#include "src/core/lib/surface/call.h"
+
+grpc_chttp2_stream* grpc_chttp2_stream_from_call(grpc_call* call);
+
+#endif /* GRPC_TEST_CORE_UTIL_DEBUGGER_MACROS_H */
diff --git a/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc b/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc
index ad7bad6fe5..99ab45120d 100644
--- a/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc
+++ b/contrib/libs/grpc/test/core/util/fuzzer_corpus_test.cc
@@ -1,169 +1,169 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <stdbool.h>
-
-#include <dirent.h>
-#include <gflags/gflags.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <gtest/gtest.h>
-#include <stdio.h>
-#include <sys/types.h>
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <stdbool.h>
+
+#include <dirent.h>
+#include <gflags/gflags.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <gtest/gtest.h>
+#include <stdio.h>
+#include <sys/types.h>
+
#include <grpc/grpc.h>
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/iomgr/load_file.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/test_config.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
-extern bool squelch;
-extern bool leak_check;
-
-// In some distros, gflags is in the namespace google, and in some others,
-// in gflags. This hack is enabling us to find both.
-namespace google {}
-namespace gflags {}
-using namespace google;
-using namespace gflags;
-
-DEFINE_string(file, "", "Use this file as test data");
-DEFINE_string(directory, "", "Use this directory as test data");
-
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/load_file.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/test_config.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
+extern bool squelch;
+extern bool leak_check;
+
+// In some distros, gflags is in the namespace google, and in some others,
+// in gflags. This hack is enabling us to find both.
+namespace google {}
+namespace gflags {}
+using namespace google;
+using namespace gflags;
+
+DEFINE_string(file, "", "Use this file as test data");
+DEFINE_string(directory, "", "Use this directory as test data");
+
class FuzzerCorpusTest : public ::testing::TestWithParam<TString> {};
-
-TEST_P(FuzzerCorpusTest, RunOneExample) {
+
+TEST_P(FuzzerCorpusTest, RunOneExample) {
// Need to call grpc_init() here to use a slice, but need to shut it
// down before calling LLVMFuzzerTestOneInput(), because most
// implementations of that function will initialize and shutdown gRPC
// internally.
grpc_init();
- gpr_log(GPR_DEBUG, "Example file: %s", GetParam().c_str());
- grpc_slice buffer;
- squelch = false;
- leak_check = false;
- GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
- grpc_load_file(GetParam().c_str(), 0, &buffer)));
+ gpr_log(GPR_DEBUG, "Example file: %s", GetParam().c_str());
+ grpc_slice buffer;
+ squelch = false;
+ leak_check = false;
+ GPR_ASSERT(GRPC_LOG_IF_ERROR("load_file",
+ grpc_load_file(GetParam().c_str(), 0, &buffer)));
size_t length = GRPC_SLICE_LENGTH(buffer);
void* data = gpr_malloc(length);
memcpy(data, GPR_SLICE_START_PTR(buffer), length);
- grpc_slice_unref(buffer);
+ grpc_slice_unref(buffer);
grpc_shutdown_blocking();
LLVMFuzzerTestOneInput(static_cast<uint8_t*>(data), length);
gpr_free(data);
-}
-
-class ExampleGenerator
+}
+
+class ExampleGenerator
: public ::testing::internal::ParamGeneratorInterface<TString> {
- public:
+ public:
virtual ::testing::internal::ParamIteratorInterface<TString>* Begin()
- const;
+ const;
virtual ::testing::internal::ParamIteratorInterface<TString>* End() const;
-
- private:
- void Materialize() const {
- if (examples_.empty()) {
- if (!FLAGS_file.empty()) examples_.push_back(FLAGS_file);
- if (!FLAGS_directory.empty()) {
- char* test_srcdir = gpr_getenv("TEST_SRCDIR");
+
+ private:
+ void Materialize() const {
+ if (examples_.empty()) {
+ if (!FLAGS_file.empty()) examples_.push_back(FLAGS_file);
+ if (!FLAGS_directory.empty()) {
+ char* test_srcdir = gpr_getenv("TEST_SRCDIR");
gpr_log(GPR_DEBUG, "test_srcdir=\"%s\"", test_srcdir);
TString directory = FLAGS_directory;
- if (test_srcdir != nullptr) {
+ if (test_srcdir != nullptr) {
directory =
test_srcdir + TString("/com_github_grpc_grpc/") + directory;
- }
+ }
gpr_log(GPR_DEBUG, "Using corpus directory: %s", directory.c_str());
- DIR* dp;
- struct dirent* ep;
+ DIR* dp;
+ struct dirent* ep;
dp = opendir(directory.c_str());
-
- if (dp != nullptr) {
- while ((ep = readdir(dp)) != nullptr) {
+
+ if (dp != nullptr) {
+ while ((ep = readdir(dp)) != nullptr) {
if (strcmp(ep->d_name, ".") != 0 && strcmp(ep->d_name, "..") != 0) {
examples_.push_back(directory + "/" + ep->d_name);
- }
- }
-
- (void)closedir(dp);
- } else {
- perror("Couldn't open the directory");
- abort();
- }
- gpr_free(test_srcdir);
- }
- }
+ }
+ }
+
+ (void)closedir(dp);
+ } else {
+ perror("Couldn't open the directory");
+ abort();
+ }
+ gpr_free(test_srcdir);
+ }
+ }
// Make sure we don't succeed without doing anything, which caused
// us to be blind to our fuzzers not running for 9 months.
GPR_ASSERT(!examples_.empty());
- }
-
+ }
+
mutable std::vector<TString> examples_;
-};
-
-class ExampleIterator
+};
+
+class ExampleIterator
: public ::testing::internal::ParamIteratorInterface<TString> {
- public:
- ExampleIterator(const ExampleGenerator& base_,
+ public:
+ ExampleIterator(const ExampleGenerator& base_,
std::vector<TString>::const_iterator begin)
- : base_(base_), begin_(begin), current_(begin) {}
-
- virtual const ExampleGenerator* BaseGenerator() const { return &base_; }
-
- virtual void Advance() { current_++; }
- virtual ExampleIterator* Clone() const { return new ExampleIterator(*this); }
+ : base_(base_), begin_(begin), current_(begin) {}
+
+ virtual const ExampleGenerator* BaseGenerator() const { return &base_; }
+
+ virtual void Advance() { current_++; }
+ virtual ExampleIterator* Clone() const { return new ExampleIterator(*this); }
virtual const TString* Current() const { return &*current_; }
-
+
virtual bool Equals(const ParamIteratorInterface<TString>& other) const {
- return &base_ == other.BaseGenerator() &&
- current_ == dynamic_cast<const ExampleIterator*>(&other)->current_;
- }
-
- private:
- ExampleIterator(const ExampleIterator& other)
- : base_(other.base_), begin_(other.begin_), current_(other.current_) {}
-
- const ExampleGenerator& base_;
+ return &base_ == other.BaseGenerator() &&
+ current_ == dynamic_cast<const ExampleIterator*>(&other)->current_;
+ }
+
+ private:
+ ExampleIterator(const ExampleIterator& other)
+ : base_(other.base_), begin_(other.begin_), current_(other.current_) {}
+
+ const ExampleGenerator& base_;
const std::vector<TString>::const_iterator begin_;
std::vector<TString>::const_iterator current_;
-};
-
+};
+
::testing::internal::ParamIteratorInterface<TString>*
-ExampleGenerator::Begin() const {
- Materialize();
- return new ExampleIterator(*this, examples_.begin());
-}
-
+ExampleGenerator::Begin() const {
+ Materialize();
+ return new ExampleIterator(*this, examples_.begin());
+}
+
::testing::internal::ParamIteratorInterface<TString>*
-ExampleGenerator::End() const {
- Materialize();
- return new ExampleIterator(*this, examples_.end());
-}
-
-INSTANTIATE_TEST_SUITE_P(
- CorpusExamples, FuzzerCorpusTest,
+ExampleGenerator::End() const {
+ Materialize();
+ return new ExampleIterator(*this, examples_.end());
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ CorpusExamples, FuzzerCorpusTest,
::testing::internal::ParamGenerator<TString>(new ExampleGenerator));
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
grpc::testing::InitTest(&argc, &argv, true);
- ::testing::InitGoogleTest(&argc, argv);
-
- return RUN_ALL_TESTS();
-}
+ ::testing::InitGoogleTest(&argc, argv);
+
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/core/util/fuzzer_one_entry_runner.sh b/contrib/libs/grpc/test/core/util/fuzzer_one_entry_runner.sh
index 7ffd96f006..7c471afcc2 100755
--- a/contrib/libs/grpc/test/core/util/fuzzer_one_entry_runner.sh
+++ b/contrib/libs/grpc/test/core/util/fuzzer_one_entry_runner.sh
@@ -1,18 +1,18 @@
-#!/bin/sh
-
-# Test runner for fuzzer tests from bazel
-
-# Copyright 2017 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"$1" "$2"
+#!/bin/sh
+
+# Test runner for fuzzer tests from bazel
+
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"$1" "$2"
diff --git a/contrib/libs/grpc/test/core/util/fuzzer_util.cc b/contrib/libs/grpc/test/core/util/fuzzer_util.cc
index 0601ae9356..29c9b8875f 100644
--- a/contrib/libs/grpc/test/core/util/fuzzer_util.cc
+++ b/contrib/libs/grpc/test/core/util/fuzzer_util.cc
@@ -1,82 +1,82 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/fuzzer_util.h"
-
-#include <grpc/support/alloc.h>
-
-#include "src/core/lib/gpr/useful.h"
-
-namespace grpc_core {
-namespace testing {
-
-uint8_t grpc_fuzzer_get_next_byte(input_stream* inp) {
- if (inp->cur == inp->end) {
- return 0;
- }
- return *inp->cur++;
-}
-
-char* grpc_fuzzer_get_next_string(input_stream* inp, bool* special) {
- char* str = nullptr;
- size_t cap = 0;
- size_t sz = 0;
- char c;
- do {
- if (cap == sz) {
- cap = GPR_MAX(3 * cap / 2, cap + 8);
- str = static_cast<char*>(gpr_realloc(str, cap));
- }
- c = static_cast<char>(grpc_fuzzer_get_next_byte(inp));
- str[sz++] = c;
- } while (c != 0 && c != 1);
- if (special != nullptr) {
- *special = (c == 1);
- }
- if (c == 1) {
- str[sz - 1] = 0;
- }
- return str;
-}
-
-uint32_t grpc_fuzzer_get_next_uint32(input_stream* inp) {
- uint8_t b = grpc_fuzzer_get_next_byte(inp);
- uint32_t x = b & 0x7f;
- if (b & 0x80) {
- x <<= 7;
- b = grpc_fuzzer_get_next_byte(inp);
- x |= b & 0x7f;
- if (b & 0x80) {
- x <<= 7;
- b = grpc_fuzzer_get_next_byte(inp);
- x |= b & 0x7f;
- if (b & 0x80) {
- x <<= 7;
- b = grpc_fuzzer_get_next_byte(inp);
- x |= b & 0x7f;
- if (b & 0x80) {
- x = (x << 4) | (grpc_fuzzer_get_next_byte(inp) & 0x0f);
- }
- }
- }
- }
- return x;
-}
-
-} // namespace testing
-} // namespace grpc_core
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/fuzzer_util.h"
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/lib/gpr/useful.h"
+
+namespace grpc_core {
+namespace testing {
+
+uint8_t grpc_fuzzer_get_next_byte(input_stream* inp) {
+ if (inp->cur == inp->end) {
+ return 0;
+ }
+ return *inp->cur++;
+}
+
+char* grpc_fuzzer_get_next_string(input_stream* inp, bool* special) {
+ char* str = nullptr;
+ size_t cap = 0;
+ size_t sz = 0;
+ char c;
+ do {
+ if (cap == sz) {
+ cap = GPR_MAX(3 * cap / 2, cap + 8);
+ str = static_cast<char*>(gpr_realloc(str, cap));
+ }
+ c = static_cast<char>(grpc_fuzzer_get_next_byte(inp));
+ str[sz++] = c;
+ } while (c != 0 && c != 1);
+ if (special != nullptr) {
+ *special = (c == 1);
+ }
+ if (c == 1) {
+ str[sz - 1] = 0;
+ }
+ return str;
+}
+
+uint32_t grpc_fuzzer_get_next_uint32(input_stream* inp) {
+ uint8_t b = grpc_fuzzer_get_next_byte(inp);
+ uint32_t x = b & 0x7f;
+ if (b & 0x80) {
+ x <<= 7;
+ b = grpc_fuzzer_get_next_byte(inp);
+ x |= b & 0x7f;
+ if (b & 0x80) {
+ x <<= 7;
+ b = grpc_fuzzer_get_next_byte(inp);
+ x |= b & 0x7f;
+ if (b & 0x80) {
+ x <<= 7;
+ b = grpc_fuzzer_get_next_byte(inp);
+ x |= b & 0x7f;
+ if (b & 0x80) {
+ x = (x << 4) | (grpc_fuzzer_get_next_byte(inp) & 0x0f);
+ }
+ }
+ }
+ }
+ return x;
+}
+
+} // namespace testing
+} // namespace grpc_core
diff --git a/contrib/libs/grpc/test/core/util/fuzzer_util.h b/contrib/libs/grpc/test/core/util/fuzzer_util.h
index 7e2d74831c..0e938399a1 100644
--- a/contrib/libs/grpc/test/core/util/fuzzer_util.h
+++ b/contrib/libs/grpc/test/core/util/fuzzer_util.h
@@ -1,49 +1,49 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_FUZZER_UTIL_H
-#define GRPC_TEST_CORE_UTIL_FUZZER_UTIL_H
-
-#include <stdint.h>
-
-namespace grpc_core {
-
-namespace testing {
-
-// Main struct for input_stream. It allows easy access to input
-// bytes, and allows reading a little past the end(avoiding
-// needing to check everywhere).
-typedef struct {
- const uint8_t* cur;
- const uint8_t* end;
-} input_stream;
-
-// get a byte from an input stream.
-uint8_t grpc_fuzzer_get_next_byte(input_stream* inp);
-
-// get a string and boolean values (if special is not null) from an input
-// stream.
-char* grpc_fuzzer_get_next_string(input_stream* inp, bool* special);
-
-// get a uint32 value from an input stream.
-uint32_t grpc_fuzzer_get_next_uint32(input_stream* inp);
-
-} // namespace testing
-} // namespace grpc_core
-
-#endif /* GRPC_TEST_CORE_UTIL_FUZZER_UTIL_H */
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_FUZZER_UTIL_H
+#define GRPC_TEST_CORE_UTIL_FUZZER_UTIL_H
+
+#include <stdint.h>
+
+namespace grpc_core {
+
+namespace testing {
+
+// Main struct for input_stream. It allows easy access to input
+// bytes, and allows reading a little past the end(avoiding
+// needing to check everywhere).
+typedef struct {
+ const uint8_t* cur;
+ const uint8_t* end;
+} input_stream;
+
+// get a byte from an input stream.
+uint8_t grpc_fuzzer_get_next_byte(input_stream* inp);
+
+// get a string and boolean values (if special is not null) from an input
+// stream.
+char* grpc_fuzzer_get_next_string(input_stream* inp, bool* special);
+
+// get a uint32 value from an input stream.
+uint32_t grpc_fuzzer_get_next_uint32(input_stream* inp);
+
+} // namespace testing
+} // namespace grpc_core
+
+#endif /* GRPC_TEST_CORE_UTIL_FUZZER_UTIL_H */
diff --git a/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl b/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl
index e6d7053374..99594b29e1 100644
--- a/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl
+++ b/contrib/libs/grpc/test/core/util/grpc_fuzzer.bzl
@@ -1,29 +1,29 @@
-# Copyright 2016 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load("//bazel:grpc_build_system.bzl", "grpc_cc_test")
-
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load("//bazel:grpc_build_system.bzl", "grpc_cc_test")
+
def grpc_fuzzer(name, corpus, srcs = [], deps = [], data = [], size = "large", **kwargs):
- grpc_cc_test(
- name = name,
- srcs = srcs,
- deps = deps + ["//test/core/util:fuzzer_corpus_test"],
+ grpc_cc_test(
+ name = name,
+ srcs = srcs,
+ deps = deps + ["//test/core/util:fuzzer_corpus_test"],
data = data + native.glob([corpus + "/**"]),
- external_deps = [
- "gtest",
- ],
- size = size,
- args = ["--directory=" + native.package_name() + "/" + corpus],
- **kwargs
- )
+ external_deps = [
+ "gtest",
+ ],
+ size = size,
+ args = ["--directory=" + native.package_name() + "/" + corpus],
+ **kwargs
+ )
diff --git a/contrib/libs/grpc/test/core/util/grpc_profiler.cc b/contrib/libs/grpc/test/core/util/grpc_profiler.cc
index 6ed0d14c9f..88f233598b 100644
--- a/contrib/libs/grpc/test/core/util/grpc_profiler.cc
+++ b/contrib/libs/grpc/test/core/util/grpc_profiler.cc
@@ -1,45 +1,45 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/grpc_profiler.h"
-
-#if GRPC_HAVE_PERFTOOLS
-#include <gperftools/profiler.h>
-
-void grpc_profiler_start(const char* filename) { ProfilerStart(filename); }
-
-void grpc_profiler_stop() { ProfilerStop(); }
-#else
-#include <grpc/support/log.h>
-
-void grpc_profiler_start(const char* filename) {
- static int printed_warning = 0;
- if (!printed_warning) {
- gpr_log(GPR_DEBUG,
- "You do not have google-perftools installed, profiling is disabled "
- "[for %s]",
- filename);
- gpr_log(GPR_DEBUG,
- "To install on ubuntu: sudo apt-get install google-perftools "
- "libgoogle-perftools-dev");
- printed_warning = 1;
- }
-}
-
-void grpc_profiler_stop(void) {}
-#endif
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/grpc_profiler.h"
+
+#if GRPC_HAVE_PERFTOOLS
+#include <gperftools/profiler.h>
+
+void grpc_profiler_start(const char* filename) { ProfilerStart(filename); }
+
+void grpc_profiler_stop() { ProfilerStop(); }
+#else
+#include <grpc/support/log.h>
+
+void grpc_profiler_start(const char* filename) {
+ static int printed_warning = 0;
+ if (!printed_warning) {
+ gpr_log(GPR_DEBUG,
+ "You do not have google-perftools installed, profiling is disabled "
+ "[for %s]",
+ filename);
+ gpr_log(GPR_DEBUG,
+ "To install on ubuntu: sudo apt-get install google-perftools "
+ "libgoogle-perftools-dev");
+ printed_warning = 1;
+ }
+}
+
+void grpc_profiler_stop(void) {}
+#endif
diff --git a/contrib/libs/grpc/test/core/util/grpc_profiler.h b/contrib/libs/grpc/test/core/util/grpc_profiler.h
index ca715c22bc..f9ddd2242e 100644
--- a/contrib/libs/grpc/test/core/util/grpc_profiler.h
+++ b/contrib/libs/grpc/test/core/util/grpc_profiler.h
@@ -1,25 +1,25 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_GRPC_PROFILER_H
-#define GRPC_TEST_CORE_UTIL_GRPC_PROFILER_H
-
-void grpc_profiler_start(const char* filename);
-void grpc_profiler_stop();
-
-#endif /* GRPC_TEST_CORE_UTIL_GRPC_PROFILER_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_GRPC_PROFILER_H
+#define GRPC_TEST_CORE_UTIL_GRPC_PROFILER_H
+
+void grpc_profiler_start(const char* filename);
+void grpc_profiler_stop();
+
+#endif /* GRPC_TEST_CORE_UTIL_GRPC_PROFILER_H */
diff --git a/contrib/libs/grpc/test/core/util/histogram.cc b/contrib/libs/grpc/test/core/util/histogram.cc
index 3b17818873..f028ac404e 100644
--- a/contrib/libs/grpc/test/core/util/histogram.cc
+++ b/contrib/libs/grpc/test/core/util/histogram.cc
@@ -1,230 +1,230 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/histogram.h"
-
-#include <math.h>
-#include <stddef.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/port_platform.h>
-
-#include "src/core/lib/gpr/useful.h"
-
-/* Histograms are stored with exponentially increasing bucket sizes.
- The first bucket is [0, m) where m = 1 + resolution
- Bucket n (n>=1) contains [m**n, m**(n+1))
- There are sufficient buckets to reach max_bucket_start */
-
-struct grpc_histogram {
- /* Sum of all values seen so far */
- double sum;
- /* Sum of squares of all values seen so far */
- double sum_of_squares;
- /* number of values seen so far */
- double count;
- /* m in the description */
- double multiplier;
- double one_on_log_multiplier;
- /* minimum value seen */
- double min_seen;
- /* maximum value seen */
- double max_seen;
- /* maximum representable value */
- double max_possible;
- /* number of buckets */
- size_t num_buckets;
- /* the buckets themselves */
- uint32_t* buckets;
-};
-
-/* determine a bucket index given a value - does no bounds checking */
-static size_t bucket_for_unchecked(grpc_histogram* h, double x) {
- return static_cast<size_t>(log(x) * h->one_on_log_multiplier);
-}
-
-/* bounds checked version of the above */
-static size_t bucket_for(grpc_histogram* h, double x) {
- size_t bucket = bucket_for_unchecked(h, GPR_CLAMP(x, 1.0, h->max_possible));
- GPR_ASSERT(bucket < h->num_buckets);
- return bucket;
-}
-
-/* at what value does a bucket start? */
-static double bucket_start(grpc_histogram* h, double x) {
- return pow(h->multiplier, x);
-}
-
-grpc_histogram* grpc_histogram_create(double resolution,
- double max_bucket_start) {
- grpc_histogram* h =
- static_cast<grpc_histogram*>(gpr_malloc(sizeof(grpc_histogram)));
- GPR_ASSERT(resolution > 0.0);
- GPR_ASSERT(max_bucket_start > resolution);
- h->sum = 0.0;
- h->sum_of_squares = 0.0;
- h->multiplier = 1.0 + resolution;
- h->one_on_log_multiplier = 1.0 / log(1.0 + resolution);
- h->max_possible = max_bucket_start;
- h->count = 0.0;
- h->min_seen = max_bucket_start;
- h->max_seen = 0.0;
- h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1;
- GPR_ASSERT(h->num_buckets > 1);
- GPR_ASSERT(h->num_buckets < 100000000);
- h->buckets =
- static_cast<uint32_t*>(gpr_zalloc(sizeof(uint32_t) * h->num_buckets));
- return h;
-}
-
-void grpc_histogram_destroy(grpc_histogram* h) {
- gpr_free(h->buckets);
- gpr_free(h);
-}
-
-void grpc_histogram_add(grpc_histogram* h, double x) {
- h->sum += x;
- h->sum_of_squares += x * x;
- h->count++;
- if (x < h->min_seen) {
- h->min_seen = x;
- }
- if (x > h->max_seen) {
- h->max_seen = x;
- }
- h->buckets[bucket_for(h, x)]++;
-}
-
-int grpc_histogram_merge(grpc_histogram* dst, const grpc_histogram* src) {
- if ((dst->num_buckets != src->num_buckets) ||
- (dst->multiplier != src->multiplier)) {
- /* Fail because these histograms don't match */
- return 0;
- }
- grpc_histogram_merge_contents(dst, src->buckets, src->num_buckets,
- src->min_seen, src->max_seen, src->sum,
- src->sum_of_squares, src->count);
- return 1;
-}
-
-void grpc_histogram_merge_contents(grpc_histogram* dst, const uint32_t* data,
- size_t data_count, double min_seen,
- double max_seen, double sum,
- double sum_of_squares, double count) {
- size_t i;
- GPR_ASSERT(dst->num_buckets == data_count);
- dst->sum += sum;
- dst->sum_of_squares += sum_of_squares;
- dst->count += count;
- if (min_seen < dst->min_seen) {
- dst->min_seen = min_seen;
- }
- if (max_seen > dst->max_seen) {
- dst->max_seen = max_seen;
- }
- for (i = 0; i < dst->num_buckets; i++) {
- dst->buckets[i] += data[i];
- }
-}
-
-static double threshold_for_count_below(grpc_histogram* h, double count_below) {
- double count_so_far;
- double lower_bound;
- double upper_bound;
- size_t lower_idx;
- size_t upper_idx;
-
- if (h->count == 0) {
- return 0.0;
- }
-
- if (count_below <= 0) {
- return h->min_seen;
- }
- if (count_below >= h->count) {
- return h->max_seen;
- }
-
- /* find the lowest bucket that gets us above count_below */
- count_so_far = 0.0;
- for (lower_idx = 0; lower_idx < h->num_buckets; lower_idx++) {
- count_so_far += h->buckets[lower_idx];
- if (count_so_far >= count_below) {
- break;
- }
- }
- if (count_so_far == count_below) {
- /* this bucket hits the threshold exactly... we should be midway through
- any run of zero values following the bucket */
- for (upper_idx = lower_idx + 1; upper_idx < h->num_buckets; upper_idx++) {
- if (h->buckets[upper_idx]) {
- break;
- }
- }
- return (bucket_start(h, static_cast<double>(lower_idx)) +
- bucket_start(h, static_cast<double>(upper_idx))) /
- 2.0;
- } else {
- /* treat values as uniform throughout the bucket, and find where this value
- should lie */
- lower_bound = bucket_start(h, static_cast<double>(lower_idx));
- upper_bound = bucket_start(h, static_cast<double>(lower_idx + 1));
- return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
- (count_so_far - count_below) /
- h->buckets[lower_idx],
- h->min_seen, h->max_seen);
- }
-}
-
-double grpc_histogram_percentile(grpc_histogram* h, double percentile) {
- return threshold_for_count_below(h, h->count * percentile / 100.0);
-}
-
-double grpc_histogram_mean(grpc_histogram* h) {
- GPR_ASSERT(h->count != 0);
- return h->sum / h->count;
-}
-
-double grpc_histogram_stddev(grpc_histogram* h) {
- return sqrt(grpc_histogram_variance(h));
-}
-
-double grpc_histogram_variance(grpc_histogram* h) {
- if (h->count == 0) return 0.0;
- return (h->sum_of_squares * h->count - h->sum * h->sum) /
- (h->count * h->count);
-}
-
-double grpc_histogram_maximum(grpc_histogram* h) { return h->max_seen; }
-
-double grpc_histogram_minimum(grpc_histogram* h) { return h->min_seen; }
-
-double grpc_histogram_count(grpc_histogram* h) { return h->count; }
-
-double grpc_histogram_sum(grpc_histogram* h) { return h->sum; }
-
-double grpc_histogram_sum_of_squares(grpc_histogram* h) {
- return h->sum_of_squares;
-}
-
-const uint32_t* grpc_histogram_get_contents(grpc_histogram* h, size_t* size) {
- *size = h->num_buckets;
- return h->buckets;
-}
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/histogram.h"
+
+#include <math.h>
+#include <stddef.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/port_platform.h>
+
+#include "src/core/lib/gpr/useful.h"
+
+/* Histograms are stored with exponentially increasing bucket sizes.
+ The first bucket is [0, m) where m = 1 + resolution
+ Bucket n (n>=1) contains [m**n, m**(n+1))
+ There are sufficient buckets to reach max_bucket_start */
+
+struct grpc_histogram {
+ /* Sum of all values seen so far */
+ double sum;
+ /* Sum of squares of all values seen so far */
+ double sum_of_squares;
+ /* number of values seen so far */
+ double count;
+ /* m in the description */
+ double multiplier;
+ double one_on_log_multiplier;
+ /* minimum value seen */
+ double min_seen;
+ /* maximum value seen */
+ double max_seen;
+ /* maximum representable value */
+ double max_possible;
+ /* number of buckets */
+ size_t num_buckets;
+ /* the buckets themselves */
+ uint32_t* buckets;
+};
+
+/* determine a bucket index given a value - does no bounds checking */
+static size_t bucket_for_unchecked(grpc_histogram* h, double x) {
+ return static_cast<size_t>(log(x) * h->one_on_log_multiplier);
+}
+
+/* bounds checked version of the above */
+static size_t bucket_for(grpc_histogram* h, double x) {
+ size_t bucket = bucket_for_unchecked(h, GPR_CLAMP(x, 1.0, h->max_possible));
+ GPR_ASSERT(bucket < h->num_buckets);
+ return bucket;
+}
+
+/* at what value does a bucket start? */
+static double bucket_start(grpc_histogram* h, double x) {
+ return pow(h->multiplier, x);
+}
+
+grpc_histogram* grpc_histogram_create(double resolution,
+ double max_bucket_start) {
+ grpc_histogram* h =
+ static_cast<grpc_histogram*>(gpr_malloc(sizeof(grpc_histogram)));
+ GPR_ASSERT(resolution > 0.0);
+ GPR_ASSERT(max_bucket_start > resolution);
+ h->sum = 0.0;
+ h->sum_of_squares = 0.0;
+ h->multiplier = 1.0 + resolution;
+ h->one_on_log_multiplier = 1.0 / log(1.0 + resolution);
+ h->max_possible = max_bucket_start;
+ h->count = 0.0;
+ h->min_seen = max_bucket_start;
+ h->max_seen = 0.0;
+ h->num_buckets = bucket_for_unchecked(h, max_bucket_start) + 1;
+ GPR_ASSERT(h->num_buckets > 1);
+ GPR_ASSERT(h->num_buckets < 100000000);
+ h->buckets =
+ static_cast<uint32_t*>(gpr_zalloc(sizeof(uint32_t) * h->num_buckets));
+ return h;
+}
+
+void grpc_histogram_destroy(grpc_histogram* h) {
+ gpr_free(h->buckets);
+ gpr_free(h);
+}
+
+void grpc_histogram_add(grpc_histogram* h, double x) {
+ h->sum += x;
+ h->sum_of_squares += x * x;
+ h->count++;
+ if (x < h->min_seen) {
+ h->min_seen = x;
+ }
+ if (x > h->max_seen) {
+ h->max_seen = x;
+ }
+ h->buckets[bucket_for(h, x)]++;
+}
+
+int grpc_histogram_merge(grpc_histogram* dst, const grpc_histogram* src) {
+ if ((dst->num_buckets != src->num_buckets) ||
+ (dst->multiplier != src->multiplier)) {
+ /* Fail because these histograms don't match */
+ return 0;
+ }
+ grpc_histogram_merge_contents(dst, src->buckets, src->num_buckets,
+ src->min_seen, src->max_seen, src->sum,
+ src->sum_of_squares, src->count);
+ return 1;
+}
+
+void grpc_histogram_merge_contents(grpc_histogram* dst, const uint32_t* data,
+ size_t data_count, double min_seen,
+ double max_seen, double sum,
+ double sum_of_squares, double count) {
+ size_t i;
+ GPR_ASSERT(dst->num_buckets == data_count);
+ dst->sum += sum;
+ dst->sum_of_squares += sum_of_squares;
+ dst->count += count;
+ if (min_seen < dst->min_seen) {
+ dst->min_seen = min_seen;
+ }
+ if (max_seen > dst->max_seen) {
+ dst->max_seen = max_seen;
+ }
+ for (i = 0; i < dst->num_buckets; i++) {
+ dst->buckets[i] += data[i];
+ }
+}
+
+static double threshold_for_count_below(grpc_histogram* h, double count_below) {
+ double count_so_far;
+ double lower_bound;
+ double upper_bound;
+ size_t lower_idx;
+ size_t upper_idx;
+
+ if (h->count == 0) {
+ return 0.0;
+ }
+
+ if (count_below <= 0) {
+ return h->min_seen;
+ }
+ if (count_below >= h->count) {
+ return h->max_seen;
+ }
+
+ /* find the lowest bucket that gets us above count_below */
+ count_so_far = 0.0;
+ for (lower_idx = 0; lower_idx < h->num_buckets; lower_idx++) {
+ count_so_far += h->buckets[lower_idx];
+ if (count_so_far >= count_below) {
+ break;
+ }
+ }
+ if (count_so_far == count_below) {
+ /* this bucket hits the threshold exactly... we should be midway through
+ any run of zero values following the bucket */
+ for (upper_idx = lower_idx + 1; upper_idx < h->num_buckets; upper_idx++) {
+ if (h->buckets[upper_idx]) {
+ break;
+ }
+ }
+ return (bucket_start(h, static_cast<double>(lower_idx)) +
+ bucket_start(h, static_cast<double>(upper_idx))) /
+ 2.0;
+ } else {
+ /* treat values as uniform throughout the bucket, and find where this value
+ should lie */
+ lower_bound = bucket_start(h, static_cast<double>(lower_idx));
+ upper_bound = bucket_start(h, static_cast<double>(lower_idx + 1));
+ return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
+ (count_so_far - count_below) /
+ h->buckets[lower_idx],
+ h->min_seen, h->max_seen);
+ }
+}
+
+double grpc_histogram_percentile(grpc_histogram* h, double percentile) {
+ return threshold_for_count_below(h, h->count * percentile / 100.0);
+}
+
+double grpc_histogram_mean(grpc_histogram* h) {
+ GPR_ASSERT(h->count != 0);
+ return h->sum / h->count;
+}
+
+double grpc_histogram_stddev(grpc_histogram* h) {
+ return sqrt(grpc_histogram_variance(h));
+}
+
+double grpc_histogram_variance(grpc_histogram* h) {
+ if (h->count == 0) return 0.0;
+ return (h->sum_of_squares * h->count - h->sum * h->sum) /
+ (h->count * h->count);
+}
+
+double grpc_histogram_maximum(grpc_histogram* h) { return h->max_seen; }
+
+double grpc_histogram_minimum(grpc_histogram* h) { return h->min_seen; }
+
+double grpc_histogram_count(grpc_histogram* h) { return h->count; }
+
+double grpc_histogram_sum(grpc_histogram* h) { return h->sum; }
+
+double grpc_histogram_sum_of_squares(grpc_histogram* h) {
+ return h->sum_of_squares;
+}
+
+const uint32_t* grpc_histogram_get_contents(grpc_histogram* h, size_t* size) {
+ *size = h->num_buckets;
+ return h->buckets;
+}
diff --git a/contrib/libs/grpc/test/core/util/histogram.h b/contrib/libs/grpc/test/core/util/histogram.h
index eeeb06352c..9d4985e64f 100644
--- a/contrib/libs/grpc/test/core/util/histogram.h
+++ b/contrib/libs/grpc/test/core/util/histogram.h
@@ -1,62 +1,62 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_SUPPORT_HISTOGRAM_H
-#define GRPC_SUPPORT_HISTOGRAM_H
-
-#include <grpc/support/port_platform.h>
-#include <stddef.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct grpc_histogram grpc_histogram;
-
-grpc_histogram* grpc_histogram_create(double resolution,
- double max_bucket_start);
-void grpc_histogram_destroy(grpc_histogram* h);
-void grpc_histogram_add(grpc_histogram* h, double x);
-
-/** The following merges the second histogram into the first. It only works
- if they have the same buckets and resolution. Returns 0 on failure, 1
- on success */
-int grpc_histogram_merge(grpc_histogram* dst, const grpc_histogram* src);
-
-double grpc_histogram_percentile(grpc_histogram* histogram, double percentile);
-double grpc_histogram_mean(grpc_histogram* histogram);
-double grpc_histogram_stddev(grpc_histogram* histogram);
-double grpc_histogram_variance(grpc_histogram* histogram);
-double grpc_histogram_maximum(grpc_histogram* histogram);
-double grpc_histogram_minimum(grpc_histogram* histogram);
-double grpc_histogram_count(grpc_histogram* histogram);
-double grpc_histogram_sum(grpc_histogram* histogram);
-double grpc_histogram_sum_of_squares(grpc_histogram* histogram);
-
-const uint32_t* grpc_histogram_get_contents(grpc_histogram* histogram,
- size_t* count);
-void grpc_histogram_merge_contents(grpc_histogram* histogram,
- const uint32_t* data, size_t data_count,
- double min_seen, double max_seen, double sum,
- double sum_of_squares, double count);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_SUPPORT_HISTOGRAM_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_SUPPORT_HISTOGRAM_H
+#define GRPC_SUPPORT_HISTOGRAM_H
+
+#include <grpc/support/port_platform.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct grpc_histogram grpc_histogram;
+
+grpc_histogram* grpc_histogram_create(double resolution,
+ double max_bucket_start);
+void grpc_histogram_destroy(grpc_histogram* h);
+void grpc_histogram_add(grpc_histogram* h, double x);
+
+/** The following merges the second histogram into the first. It only works
+ if they have the same buckets and resolution. Returns 0 on failure, 1
+ on success */
+int grpc_histogram_merge(grpc_histogram* dst, const grpc_histogram* src);
+
+double grpc_histogram_percentile(grpc_histogram* histogram, double percentile);
+double grpc_histogram_mean(grpc_histogram* histogram);
+double grpc_histogram_stddev(grpc_histogram* histogram);
+double grpc_histogram_variance(grpc_histogram* histogram);
+double grpc_histogram_maximum(grpc_histogram* histogram);
+double grpc_histogram_minimum(grpc_histogram* histogram);
+double grpc_histogram_count(grpc_histogram* histogram);
+double grpc_histogram_sum(grpc_histogram* histogram);
+double grpc_histogram_sum_of_squares(grpc_histogram* histogram);
+
+const uint32_t* grpc_histogram_get_contents(grpc_histogram* histogram,
+ size_t* count);
+void grpc_histogram_merge_contents(grpc_histogram* histogram,
+ const uint32_t* data, size_t data_count,
+ double min_seen, double max_seen, double sum,
+ double sum_of_squares, double count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_SUPPORT_HISTOGRAM_H */
diff --git a/contrib/libs/grpc/test/core/util/histogram_test.cc b/contrib/libs/grpc/test/core/util/histogram_test.cc
index 44c0084185..b96ac7d841 100644
--- a/contrib/libs/grpc/test/core/util/histogram_test.cc
+++ b/contrib/libs/grpc/test/core/util/histogram_test.cc
@@ -1,163 +1,163 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/histogram.h"
-#include <grpc/support/log.h>
-
-#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x);
-
-static void test_no_op(void) {
- grpc_histogram_destroy(grpc_histogram_create(0.01, 60e9));
-}
-
-static void expect_percentile(grpc_histogram* h, double percentile,
- double min_expect, double max_expect) {
- double got = grpc_histogram_percentile(h, percentile);
- gpr_log(GPR_INFO, "@%f%%, expect %f <= %f <= %f", percentile, min_expect, got,
- max_expect);
- GPR_ASSERT(min_expect <= got);
- GPR_ASSERT(got <= max_expect);
-}
-
-static void test_simple(void) {
- grpc_histogram* h;
-
- LOG_TEST("test_simple");
-
- h = grpc_histogram_create(0.01, 60e9);
- grpc_histogram_add(h, 10000);
- grpc_histogram_add(h, 10000);
- grpc_histogram_add(h, 11000);
- grpc_histogram_add(h, 11000);
-
- expect_percentile(h, 50, 10001, 10999);
- GPR_ASSERT(grpc_histogram_mean(h) == 10500);
-
- grpc_histogram_destroy(h);
-}
-
-static void test_percentile(void) {
- grpc_histogram* h;
- double last;
- double i;
- double cur;
-
- LOG_TEST("test_percentile");
-
- h = grpc_histogram_create(0.05, 1e9);
- grpc_histogram_add(h, 2.5);
- grpc_histogram_add(h, 2.5);
- grpc_histogram_add(h, 8);
- grpc_histogram_add(h, 4);
-
- GPR_ASSERT(grpc_histogram_count(h) == 4);
- GPR_ASSERT(grpc_histogram_minimum(h) == 2.5);
- GPR_ASSERT(grpc_histogram_maximum(h) == 8);
- GPR_ASSERT(grpc_histogram_sum(h) == 17);
- GPR_ASSERT(grpc_histogram_sum_of_squares(h) == 92.5);
- GPR_ASSERT(grpc_histogram_mean(h) == 4.25);
- GPR_ASSERT(grpc_histogram_variance(h) == 5.0625);
- GPR_ASSERT(grpc_histogram_stddev(h) == 2.25);
-
- expect_percentile(h, -10, 2.5, 2.5);
- expect_percentile(h, 0, 2.5, 2.5);
- expect_percentile(h, 12.5, 2.5, 2.5);
- expect_percentile(h, 25, 2.5, 2.5);
- expect_percentile(h, 37.5, 2.5, 2.8);
- expect_percentile(h, 50, 3.0, 3.5);
- expect_percentile(h, 62.5, 3.5, 4.5);
- expect_percentile(h, 75, 5, 7.9);
- expect_percentile(h, 100, 8, 8);
- expect_percentile(h, 110, 8, 8);
-
- /* test monotonicity */
- last = 0.0;
- for (i = 0; i < 100.0; i += 0.01) {
- cur = grpc_histogram_percentile(h, i);
- GPR_ASSERT(cur >= last);
- last = cur;
- }
-
- grpc_histogram_destroy(h);
-}
-
-static void test_merge(void) {
- grpc_histogram *h1, *h2;
- double last;
- double i;
- double cur;
-
- LOG_TEST("test_merge");
-
- h1 = grpc_histogram_create(0.05, 1e9);
- grpc_histogram_add(h1, 2.5);
- grpc_histogram_add(h1, 2.5);
- grpc_histogram_add(h1, 8);
- grpc_histogram_add(h1, 4);
-
- h2 = grpc_histogram_create(0.01, 1e9);
- GPR_ASSERT(grpc_histogram_merge(h1, h2) == 0);
- grpc_histogram_destroy(h2);
-
- h2 = grpc_histogram_create(0.05, 1e10);
- GPR_ASSERT(grpc_histogram_merge(h1, h2) == 0);
- grpc_histogram_destroy(h2);
-
- h2 = grpc_histogram_create(0.05, 1e9);
- GPR_ASSERT(grpc_histogram_merge(h1, h2) == 1);
- GPR_ASSERT(grpc_histogram_count(h1) == 4);
- GPR_ASSERT(grpc_histogram_minimum(h1) == 2.5);
- GPR_ASSERT(grpc_histogram_maximum(h1) == 8);
- GPR_ASSERT(grpc_histogram_sum(h1) == 17);
- GPR_ASSERT(grpc_histogram_sum_of_squares(h1) == 92.5);
- GPR_ASSERT(grpc_histogram_mean(h1) == 4.25);
- GPR_ASSERT(grpc_histogram_variance(h1) == 5.0625);
- GPR_ASSERT(grpc_histogram_stddev(h1) == 2.25);
- grpc_histogram_destroy(h2);
-
- h2 = grpc_histogram_create(0.05, 1e9);
- grpc_histogram_add(h2, 7.0);
- grpc_histogram_add(h2, 17.0);
- grpc_histogram_add(h2, 1.0);
- GPR_ASSERT(grpc_histogram_merge(h1, h2) == 1);
- GPR_ASSERT(grpc_histogram_count(h1) == 7);
- GPR_ASSERT(grpc_histogram_minimum(h1) == 1.0);
- GPR_ASSERT(grpc_histogram_maximum(h1) == 17.0);
- GPR_ASSERT(grpc_histogram_sum(h1) == 42.0);
- GPR_ASSERT(grpc_histogram_sum_of_squares(h1) == 431.5);
- GPR_ASSERT(grpc_histogram_mean(h1) == 6.0);
-
- /* test monotonicity */
- last = 0.0;
- for (i = 0; i < 100.0; i += 0.01) {
- cur = grpc_histogram_percentile(h1, i);
- GPR_ASSERT(cur >= last);
- last = cur;
- }
-
- grpc_histogram_destroy(h1);
- grpc_histogram_destroy(h2);
-}
-
-int main(void) {
- test_no_op();
- test_simple();
- test_percentile();
- test_merge();
- return 0;
-}
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/histogram.h"
+#include <grpc/support/log.h>
+
+#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x);
+
+static void test_no_op(void) {
+ grpc_histogram_destroy(grpc_histogram_create(0.01, 60e9));
+}
+
+static void expect_percentile(grpc_histogram* h, double percentile,
+ double min_expect, double max_expect) {
+ double got = grpc_histogram_percentile(h, percentile);
+ gpr_log(GPR_INFO, "@%f%%, expect %f <= %f <= %f", percentile, min_expect, got,
+ max_expect);
+ GPR_ASSERT(min_expect <= got);
+ GPR_ASSERT(got <= max_expect);
+}
+
+static void test_simple(void) {
+ grpc_histogram* h;
+
+ LOG_TEST("test_simple");
+
+ h = grpc_histogram_create(0.01, 60e9);
+ grpc_histogram_add(h, 10000);
+ grpc_histogram_add(h, 10000);
+ grpc_histogram_add(h, 11000);
+ grpc_histogram_add(h, 11000);
+
+ expect_percentile(h, 50, 10001, 10999);
+ GPR_ASSERT(grpc_histogram_mean(h) == 10500);
+
+ grpc_histogram_destroy(h);
+}
+
+static void test_percentile(void) {
+ grpc_histogram* h;
+ double last;
+ double i;
+ double cur;
+
+ LOG_TEST("test_percentile");
+
+ h = grpc_histogram_create(0.05, 1e9);
+ grpc_histogram_add(h, 2.5);
+ grpc_histogram_add(h, 2.5);
+ grpc_histogram_add(h, 8);
+ grpc_histogram_add(h, 4);
+
+ GPR_ASSERT(grpc_histogram_count(h) == 4);
+ GPR_ASSERT(grpc_histogram_minimum(h) == 2.5);
+ GPR_ASSERT(grpc_histogram_maximum(h) == 8);
+ GPR_ASSERT(grpc_histogram_sum(h) == 17);
+ GPR_ASSERT(grpc_histogram_sum_of_squares(h) == 92.5);
+ GPR_ASSERT(grpc_histogram_mean(h) == 4.25);
+ GPR_ASSERT(grpc_histogram_variance(h) == 5.0625);
+ GPR_ASSERT(grpc_histogram_stddev(h) == 2.25);
+
+ expect_percentile(h, -10, 2.5, 2.5);
+ expect_percentile(h, 0, 2.5, 2.5);
+ expect_percentile(h, 12.5, 2.5, 2.5);
+ expect_percentile(h, 25, 2.5, 2.5);
+ expect_percentile(h, 37.5, 2.5, 2.8);
+ expect_percentile(h, 50, 3.0, 3.5);
+ expect_percentile(h, 62.5, 3.5, 4.5);
+ expect_percentile(h, 75, 5, 7.9);
+ expect_percentile(h, 100, 8, 8);
+ expect_percentile(h, 110, 8, 8);
+
+ /* test monotonicity */
+ last = 0.0;
+ for (i = 0; i < 100.0; i += 0.01) {
+ cur = grpc_histogram_percentile(h, i);
+ GPR_ASSERT(cur >= last);
+ last = cur;
+ }
+
+ grpc_histogram_destroy(h);
+}
+
+static void test_merge(void) {
+ grpc_histogram *h1, *h2;
+ double last;
+ double i;
+ double cur;
+
+ LOG_TEST("test_merge");
+
+ h1 = grpc_histogram_create(0.05, 1e9);
+ grpc_histogram_add(h1, 2.5);
+ grpc_histogram_add(h1, 2.5);
+ grpc_histogram_add(h1, 8);
+ grpc_histogram_add(h1, 4);
+
+ h2 = grpc_histogram_create(0.01, 1e9);
+ GPR_ASSERT(grpc_histogram_merge(h1, h2) == 0);
+ grpc_histogram_destroy(h2);
+
+ h2 = grpc_histogram_create(0.05, 1e10);
+ GPR_ASSERT(grpc_histogram_merge(h1, h2) == 0);
+ grpc_histogram_destroy(h2);
+
+ h2 = grpc_histogram_create(0.05, 1e9);
+ GPR_ASSERT(grpc_histogram_merge(h1, h2) == 1);
+ GPR_ASSERT(grpc_histogram_count(h1) == 4);
+ GPR_ASSERT(grpc_histogram_minimum(h1) == 2.5);
+ GPR_ASSERT(grpc_histogram_maximum(h1) == 8);
+ GPR_ASSERT(grpc_histogram_sum(h1) == 17);
+ GPR_ASSERT(grpc_histogram_sum_of_squares(h1) == 92.5);
+ GPR_ASSERT(grpc_histogram_mean(h1) == 4.25);
+ GPR_ASSERT(grpc_histogram_variance(h1) == 5.0625);
+ GPR_ASSERT(grpc_histogram_stddev(h1) == 2.25);
+ grpc_histogram_destroy(h2);
+
+ h2 = grpc_histogram_create(0.05, 1e9);
+ grpc_histogram_add(h2, 7.0);
+ grpc_histogram_add(h2, 17.0);
+ grpc_histogram_add(h2, 1.0);
+ GPR_ASSERT(grpc_histogram_merge(h1, h2) == 1);
+ GPR_ASSERT(grpc_histogram_count(h1) == 7);
+ GPR_ASSERT(grpc_histogram_minimum(h1) == 1.0);
+ GPR_ASSERT(grpc_histogram_maximum(h1) == 17.0);
+ GPR_ASSERT(grpc_histogram_sum(h1) == 42.0);
+ GPR_ASSERT(grpc_histogram_sum_of_squares(h1) == 431.5);
+ GPR_ASSERT(grpc_histogram_mean(h1) == 6.0);
+
+ /* test monotonicity */
+ last = 0.0;
+ for (i = 0; i < 100.0; i += 0.01) {
+ cur = grpc_histogram_percentile(h1, i);
+ GPR_ASSERT(cur >= last);
+ last = cur;
+ }
+
+ grpc_histogram_destroy(h1);
+ grpc_histogram_destroy(h2);
+}
+
+int main(void) {
+ test_no_op();
+ test_simple();
+ test_percentile();
+ test_merge();
+ return 0;
+}
diff --git a/contrib/libs/grpc/test/core/util/lsan_suppressions.txt b/contrib/libs/grpc/test/core/util/lsan_suppressions.txt
index 4ab9d9978b..204ddbef5e 100644
--- a/contrib/libs/grpc/test/core/util/lsan_suppressions.txt
+++ b/contrib/libs/grpc/test/core/util/lsan_suppressions.txt
@@ -1,6 +1,6 @@
-# this is busted in BoringSSL
-leak:CRYPTO_set_thread_local
-leak:err_get_state
-leak:ERR_add_error_dataf
-leak:err_add_error_vdata
-leak:RAND_bytes_with_additional_data
+# this is busted in BoringSSL
+leak:CRYPTO_set_thread_local
+leak:err_get_state
+leak:ERR_add_error_dataf
+leak:err_add_error_vdata
+leak:RAND_bytes_with_additional_data
diff --git a/contrib/libs/grpc/test/core/util/memory_counters.cc b/contrib/libs/grpc/test/core/util/memory_counters.cc
index 62cfc9b4c3..ddd8d2f283 100644
--- a/contrib/libs/grpc/test/core/util/memory_counters.cc
+++ b/contrib/libs/grpc/test/core/util/memory_counters.cc
@@ -1,169 +1,169 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <string.h>
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-
-#include "src/core/lib/gpr/alloc.h"
-#include "src/core/lib/surface/init.h"
-#include "test/core/util/memory_counters.h"
-
-#include <stdio.h>
-
-static struct grpc_memory_counters g_memory_counters;
-static bool g_memory_counter_enabled;
-
-#ifdef GPR_LOW_LEVEL_COUNTERS
-/* hide these from the microbenchmark atomic stats */
-#define NO_BARRIER_FETCH_ADD(x, sz) \
- __atomic_fetch_add((x), (sz), __ATOMIC_RELAXED)
-#define NO_BARRIER_LOAD(x) __atomic_load_n((x), __ATOMIC_RELAXED)
-#else
-#define NO_BARRIER_FETCH_ADD(x, sz) gpr_atm_no_barrier_fetch_add(x, sz)
-#define NO_BARRIER_LOAD(x) gpr_atm_no_barrier_load(x)
-#endif
-
-// Memory counter uses --wrap=symbol feature from ld. To use this,
-// `GPR_WRAP_MEMORY_COUNTER` needs to be defined. following options should be
-// passed to the compiler.
-// -Wl,--wrap=malloc -Wl,--wrap=calloc -Wl,--wrap=realloc -Wl,--wrap=free
-// * Reference: https://linux.die.net/man/1/ld)
-#if GPR_WRAP_MEMORY_COUNTER
-
-extern "C" {
-void* __real_malloc(size_t size);
-void* __real_calloc(size_t size);
-void* __real_realloc(void* ptr, size_t size);
-void __real_free(void* ptr);
-
-void* __wrap_malloc(size_t size);
-void* __wrap_calloc(size_t size);
-void* __wrap_realloc(void* ptr, size_t size);
-void __wrap_free(void* ptr);
-}
-
-void* __wrap_malloc(size_t size) {
- if (!size) return nullptr;
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1);
- void* ptr =
- __real_malloc(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
- *static_cast<size_t*>(ptr) = size;
- return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
-}
-
-void* __wrap_calloc(size_t size) {
- if (!size) return nullptr;
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1);
- void* ptr =
- __real_calloc(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
- *static_cast<size_t*>(ptr) = size;
- return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
-}
-
-void* __wrap_realloc(void* ptr, size_t size) {
- if (ptr == nullptr) {
- return __wrap_malloc(size);
- }
- if (size == 0) {
- __wrap_free(ptr);
- return nullptr;
- }
- void* rptr =
- static_cast<char*>(ptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
- -*static_cast<gpr_atm*>(rptr));
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
- void* new_ptr =
- __real_realloc(rptr, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
- *static_cast<size_t*>(new_ptr) = size;
- return static_cast<char*>(new_ptr) +
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
-}
-
-void __wrap_free(void* ptr) {
- if (ptr == nullptr) return;
- void* rptr =
- static_cast<char*>(ptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size_t));
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
- -*static_cast<gpr_atm*>(rptr));
- NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, -(gpr_atm)1);
- __real_free(rptr);
-}
-
-#endif // GPR_WRAP_MEMORY_COUNTER
-
-void grpc_memory_counters_init() {
- memset(&g_memory_counters, 0, sizeof(g_memory_counters));
- g_memory_counter_enabled = true;
-}
-
-void grpc_memory_counters_destroy() { g_memory_counter_enabled = false; }
-
-struct grpc_memory_counters grpc_memory_counters_snapshot() {
- struct grpc_memory_counters counters;
- counters.total_size_relative =
- NO_BARRIER_LOAD(&g_memory_counters.total_size_relative);
- counters.total_size_absolute =
- NO_BARRIER_LOAD(&g_memory_counters.total_size_absolute);
- counters.total_allocs_relative =
- NO_BARRIER_LOAD(&g_memory_counters.total_allocs_relative);
- counters.total_allocs_absolute =
- NO_BARRIER_LOAD(&g_memory_counters.total_allocs_absolute);
- return counters;
-}
-
-namespace grpc_core {
-namespace testing {
-
-LeakDetector::LeakDetector(bool enable) : enabled_(enable) {
- if (enabled_) {
- grpc_memory_counters_init();
- }
-}
-
-LeakDetector::~LeakDetector() {
- // Wait for grpc_shutdown() to finish its async work.
- grpc_maybe_wait_for_async_shutdown();
- if (enabled_) {
- struct grpc_memory_counters counters = grpc_memory_counters_snapshot();
- if (counters.total_size_relative != 0) {
- gpr_log(GPR_ERROR, "Leaking %" PRIuPTR " bytes",
- static_cast<uintptr_t>(counters.total_size_relative));
- GPR_ASSERT(0);
- }
- grpc_memory_counters_destroy();
- }
-}
-
-} // namespace testing
-} // namespace grpc_core
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+
+#include "src/core/lib/gpr/alloc.h"
+#include "src/core/lib/surface/init.h"
+#include "test/core/util/memory_counters.h"
+
+#include <stdio.h>
+
+static struct grpc_memory_counters g_memory_counters;
+static bool g_memory_counter_enabled;
+
+#ifdef GPR_LOW_LEVEL_COUNTERS
+/* hide these from the microbenchmark atomic stats */
+#define NO_BARRIER_FETCH_ADD(x, sz) \
+ __atomic_fetch_add((x), (sz), __ATOMIC_RELAXED)
+#define NO_BARRIER_LOAD(x) __atomic_load_n((x), __ATOMIC_RELAXED)
+#else
+#define NO_BARRIER_FETCH_ADD(x, sz) gpr_atm_no_barrier_fetch_add(x, sz)
+#define NO_BARRIER_LOAD(x) gpr_atm_no_barrier_load(x)
+#endif
+
+// Memory counter uses --wrap=symbol feature from ld. To use this,
+// `GPR_WRAP_MEMORY_COUNTER` needs to be defined. following options should be
+// passed to the compiler.
+// -Wl,--wrap=malloc -Wl,--wrap=calloc -Wl,--wrap=realloc -Wl,--wrap=free
+// * Reference: https://linux.die.net/man/1/ld)
+#if GPR_WRAP_MEMORY_COUNTER
+
+extern "C" {
+void* __real_malloc(size_t size);
+void* __real_calloc(size_t size);
+void* __real_realloc(void* ptr, size_t size);
+void __real_free(void* ptr);
+
+void* __wrap_malloc(size_t size);
+void* __wrap_calloc(size_t size);
+void* __wrap_realloc(void* ptr, size_t size);
+void __wrap_free(void* ptr);
+}
+
+void* __wrap_malloc(size_t size) {
+ if (!size) return nullptr;
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1);
+ void* ptr =
+ __real_malloc(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
+ *static_cast<size_t*>(ptr) = size;
+ return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+}
+
+void* __wrap_calloc(size_t size) {
+ if (!size) return nullptr;
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1);
+ void* ptr =
+ __real_calloc(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
+ *static_cast<size_t*>(ptr) = size;
+ return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+}
+
+void* __wrap_realloc(void* ptr, size_t size) {
+ if (ptr == nullptr) {
+ return __wrap_malloc(size);
+ }
+ if (size == 0) {
+ __wrap_free(ptr);
+ return nullptr;
+ }
+ void* rptr =
+ static_cast<char*>(ptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
+ -*static_cast<gpr_atm*>(rptr));
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
+ void* new_ptr =
+ __real_realloc(rptr, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
+ *static_cast<size_t*>(new_ptr) = size;
+ return static_cast<char*>(new_ptr) +
+ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
+}
+
+void __wrap_free(void* ptr) {
+ if (ptr == nullptr) return;
+ void* rptr =
+ static_cast<char*>(ptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size_t));
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
+ -*static_cast<gpr_atm*>(rptr));
+ NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, -(gpr_atm)1);
+ __real_free(rptr);
+}
+
+#endif // GPR_WRAP_MEMORY_COUNTER
+
+void grpc_memory_counters_init() {
+ memset(&g_memory_counters, 0, sizeof(g_memory_counters));
+ g_memory_counter_enabled = true;
+}
+
+void grpc_memory_counters_destroy() { g_memory_counter_enabled = false; }
+
+struct grpc_memory_counters grpc_memory_counters_snapshot() {
+ struct grpc_memory_counters counters;
+ counters.total_size_relative =
+ NO_BARRIER_LOAD(&g_memory_counters.total_size_relative);
+ counters.total_size_absolute =
+ NO_BARRIER_LOAD(&g_memory_counters.total_size_absolute);
+ counters.total_allocs_relative =
+ NO_BARRIER_LOAD(&g_memory_counters.total_allocs_relative);
+ counters.total_allocs_absolute =
+ NO_BARRIER_LOAD(&g_memory_counters.total_allocs_absolute);
+ return counters;
+}
+
+namespace grpc_core {
+namespace testing {
+
+LeakDetector::LeakDetector(bool enable) : enabled_(enable) {
+ if (enabled_) {
+ grpc_memory_counters_init();
+ }
+}
+
+LeakDetector::~LeakDetector() {
+ // Wait for grpc_shutdown() to finish its async work.
+ grpc_maybe_wait_for_async_shutdown();
+ if (enabled_) {
+ struct grpc_memory_counters counters = grpc_memory_counters_snapshot();
+ if (counters.total_size_relative != 0) {
+ gpr_log(GPR_ERROR, "Leaking %" PRIuPTR " bytes",
+ static_cast<uintptr_t>(counters.total_size_relative));
+ GPR_ASSERT(0);
+ }
+ grpc_memory_counters_destroy();
+ }
+}
+
+} // namespace testing
+} // namespace grpc_core
diff --git a/contrib/libs/grpc/test/core/util/memory_counters.h b/contrib/libs/grpc/test/core/util/memory_counters.h
index bb4100032d..c92a001ff1 100644
--- a/contrib/libs/grpc/test/core/util/memory_counters.h
+++ b/contrib/libs/grpc/test/core/util/memory_counters.h
@@ -1,53 +1,53 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_MEMORY_COUNTERS_H
-#define GRPC_TEST_CORE_UTIL_MEMORY_COUNTERS_H
-
-#include <grpc/support/atm.h>
-
-struct grpc_memory_counters {
- gpr_atm total_size_relative;
- gpr_atm total_size_absolute;
- gpr_atm total_allocs_relative;
- gpr_atm total_allocs_absolute;
-};
-
-void grpc_memory_counters_init();
-void grpc_memory_counters_destroy();
-struct grpc_memory_counters grpc_memory_counters_snapshot();
-
-namespace grpc_core {
-namespace testing {
-
-// At destruction time, it will check there is no memory leak.
-// The object should be created before grpc_init() is called and destroyed after
-// grpc_shutdown() is returned.
-class LeakDetector {
- public:
- explicit LeakDetector(bool enable);
- ~LeakDetector();
-
- private:
- const bool enabled_;
-};
-
-} // namespace testing
-} // namespace grpc_core
-
-#endif
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_MEMORY_COUNTERS_H
+#define GRPC_TEST_CORE_UTIL_MEMORY_COUNTERS_H
+
+#include <grpc/support/atm.h>
+
+struct grpc_memory_counters {
+ gpr_atm total_size_relative;
+ gpr_atm total_size_absolute;
+ gpr_atm total_allocs_relative;
+ gpr_atm total_allocs_absolute;
+};
+
+void grpc_memory_counters_init();
+void grpc_memory_counters_destroy();
+struct grpc_memory_counters grpc_memory_counters_snapshot();
+
+namespace grpc_core {
+namespace testing {
+
+// At destruction time, it will check there is no memory leak.
+// The object should be created before grpc_init() is called and destroyed after
+// grpc_shutdown() is returned.
+class LeakDetector {
+ public:
+ explicit LeakDetector(bool enable);
+ ~LeakDetector();
+
+ private:
+ const bool enabled_;
+};
+
+} // namespace testing
+} // namespace grpc_core
+
+#endif
diff --git a/contrib/libs/grpc/test/core/util/mock_endpoint.cc b/contrib/libs/grpc/test/core/util/mock_endpoint.cc
index 6191adee89..3c2d6a3f8b 100644
--- a/contrib/libs/grpc/test/core/util/mock_endpoint.cc
+++ b/contrib/libs/grpc/test/core/util/mock_endpoint.cc
@@ -1,155 +1,155 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
- using that endpoint. Because of various transitive includes in uv.h,
- including windows.h on Windows, uv.h must be included before other system
- headers. Therefore, sockaddr.h must always be included first */
-#include "src/core/lib/iomgr/sockaddr.h"
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
+ using that endpoint. Because of various transitive includes in uv.h,
+ including windows.h on Windows, uv.h must be included before other system
+ headers. Therefore, sockaddr.h must always be included first */
+#include "src/core/lib/iomgr/sockaddr.h"
+
#include <inttypes.h>
#include <util/generic/string.h>
#include "y_absl/strings/str_format.h"
-#include "test/core/util/mock_endpoint.h"
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/string_util.h>
-#include "src/core/lib/iomgr/sockaddr.h"
-
-typedef struct mock_endpoint {
- grpc_endpoint base;
- gpr_mu mu;
- void (*on_write)(grpc_slice slice);
- grpc_slice_buffer read_buffer;
- grpc_slice_buffer* on_read_out;
- grpc_closure* on_read;
- grpc_resource_user* resource_user;
-} mock_endpoint;
-
-static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb, bool /*urgent*/) {
- mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
- gpr_mu_lock(&m->mu);
- if (m->read_buffer.count > 0) {
- grpc_slice_buffer_swap(&m->read_buffer, slices);
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
- } else {
- m->on_read = cb;
- m->on_read_out = slices;
- }
- gpr_mu_unlock(&m->mu);
-}
-
-static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb, void* /*arg*/) {
- mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
- for (size_t i = 0; i < slices->count; i++) {
- m->on_write(slices->slices[i]);
- }
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
-}
-
-static void me_add_to_pollset(grpc_endpoint* /*ep*/,
- grpc_pollset* /*pollset*/) {}
-
-static void me_add_to_pollset_set(grpc_endpoint* /*ep*/,
- grpc_pollset_set* /*pollset*/) {}
-
-static void me_delete_from_pollset_set(grpc_endpoint* /*ep*/,
- grpc_pollset_set* /*pollset*/) {}
-
-static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
- mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
- gpr_mu_lock(&m->mu);
- if (m->on_read) {
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, m->on_read,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
- "Endpoint Shutdown", &why, 1));
- m->on_read = nullptr;
- }
- gpr_mu_unlock(&m->mu);
- grpc_resource_user_shutdown(m->resource_user);
- GRPC_ERROR_UNREF(why);
-}
-
-static void me_destroy(grpc_endpoint* ep) {
- mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
- grpc_slice_buffer_destroy(&m->read_buffer);
- grpc_resource_user_unref(m->resource_user);
- gpr_mu_destroy(&m->mu);
- gpr_free(m);
-}
-
+#include "test/core/util/mock_endpoint.h"
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include "src/core/lib/iomgr/sockaddr.h"
+
+typedef struct mock_endpoint {
+ grpc_endpoint base;
+ gpr_mu mu;
+ void (*on_write)(grpc_slice slice);
+ grpc_slice_buffer read_buffer;
+ grpc_slice_buffer* on_read_out;
+ grpc_closure* on_read;
+ grpc_resource_user* resource_user;
+} mock_endpoint;
+
+static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+ grpc_closure* cb, bool /*urgent*/) {
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
+ gpr_mu_lock(&m->mu);
+ if (m->read_buffer.count > 0) {
+ grpc_slice_buffer_swap(&m->read_buffer, slices);
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
+ } else {
+ m->on_read = cb;
+ m->on_read_out = slices;
+ }
+ gpr_mu_unlock(&m->mu);
+}
+
+static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+ grpc_closure* cb, void* /*arg*/) {
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
+ for (size_t i = 0; i < slices->count; i++) {
+ m->on_write(slices->slices[i]);
+ }
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
+}
+
+static void me_add_to_pollset(grpc_endpoint* /*ep*/,
+ grpc_pollset* /*pollset*/) {}
+
+static void me_add_to_pollset_set(grpc_endpoint* /*ep*/,
+ grpc_pollset_set* /*pollset*/) {}
+
+static void me_delete_from_pollset_set(grpc_endpoint* /*ep*/,
+ grpc_pollset_set* /*pollset*/) {}
+
+static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
+ gpr_mu_lock(&m->mu);
+ if (m->on_read) {
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, m->on_read,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+ "Endpoint Shutdown", &why, 1));
+ m->on_read = nullptr;
+ }
+ gpr_mu_unlock(&m->mu);
+ grpc_resource_user_shutdown(m->resource_user);
+ GRPC_ERROR_UNREF(why);
+}
+
+static void me_destroy(grpc_endpoint* ep) {
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
+ grpc_slice_buffer_destroy(&m->read_buffer);
+ grpc_resource_user_unref(m->resource_user);
+ gpr_mu_destroy(&m->mu);
+ gpr_free(m);
+}
+
static y_absl::string_view me_get_peer(grpc_endpoint* /*ep*/) {
return "fake:mock_endpoint";
-}
-
+}
+
static y_absl::string_view me_get_local_address(grpc_endpoint* /*ep*/) {
return "fake:mock_endpoint";
}
-static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
- mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
- return m->resource_user;
-}
-
-static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; }
-
-static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; }
-
-static const grpc_endpoint_vtable vtable = {me_read,
- me_write,
- me_add_to_pollset,
- me_add_to_pollset_set,
- me_delete_from_pollset_set,
- me_shutdown,
- me_destroy,
- me_get_resource_user,
- me_get_peer,
+static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
+ return m->resource_user;
+}
+
+static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; }
+
+static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; }
+
+static const grpc_endpoint_vtable vtable = {me_read,
+ me_write,
+ me_add_to_pollset,
+ me_add_to_pollset_set,
+ me_delete_from_pollset_set,
+ me_shutdown,
+ me_destroy,
+ me_get_resource_user,
+ me_get_peer,
me_get_local_address,
- me_get_fd,
- me_can_track_err};
-
-grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
- grpc_resource_quota* resource_quota) {
- mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m)));
- m->base.vtable = &vtable;
+ me_get_fd,
+ me_can_track_err};
+
+grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
+ grpc_resource_quota* resource_quota) {
+ mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m)));
+ m->base.vtable = &vtable;
TString name = y_absl::StrFormat("mock_endpoint_%" PRIxPTR, (intptr_t)m);
m->resource_user = grpc_resource_user_create(resource_quota, name.c_str());
- grpc_slice_buffer_init(&m->read_buffer);
- gpr_mu_init(&m->mu);
- m->on_write = on_write;
- m->on_read = nullptr;
- return &m->base;
-}
-
-void grpc_mock_endpoint_put_read(grpc_endpoint* ep, grpc_slice slice) {
- mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
- gpr_mu_lock(&m->mu);
- if (m->on_read != nullptr) {
- grpc_slice_buffer_add(m->on_read_out, slice);
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, m->on_read, GRPC_ERROR_NONE);
- m->on_read = nullptr;
- } else {
- grpc_slice_buffer_add(&m->read_buffer, slice);
- }
- gpr_mu_unlock(&m->mu);
-}
+ grpc_slice_buffer_init(&m->read_buffer);
+ gpr_mu_init(&m->mu);
+ m->on_write = on_write;
+ m->on_read = nullptr;
+ return &m->base;
+}
+
+void grpc_mock_endpoint_put_read(grpc_endpoint* ep, grpc_slice slice) {
+ mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
+ gpr_mu_lock(&m->mu);
+ if (m->on_read != nullptr) {
+ grpc_slice_buffer_add(m->on_read_out, slice);
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, m->on_read, GRPC_ERROR_NONE);
+ m->on_read = nullptr;
+ } else {
+ grpc_slice_buffer_add(&m->read_buffer, slice);
+ }
+ gpr_mu_unlock(&m->mu);
+}
diff --git a/contrib/libs/grpc/test/core/util/mock_endpoint.h b/contrib/libs/grpc/test/core/util/mock_endpoint.h
index 1d63c36256..6521d3e8e8 100644
--- a/contrib/libs/grpc/test/core/util/mock_endpoint.h
+++ b/contrib/libs/grpc/test/core/util/mock_endpoint.h
@@ -1,29 +1,29 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef MOCK_ENDPOINT_H
-#define MOCK_ENDPOINT_H
-
-#include "src/core/lib/iomgr/endpoint.h"
-
-grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
- grpc_resource_quota* resource_quota);
-void grpc_mock_endpoint_put_read(grpc_endpoint* mock_endpoint,
- grpc_slice slice);
-
-#endif
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef MOCK_ENDPOINT_H
+#define MOCK_ENDPOINT_H
+
+#include "src/core/lib/iomgr/endpoint.h"
+
+grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
+ grpc_resource_quota* resource_quota);
+void grpc_mock_endpoint_put_read(grpc_endpoint* mock_endpoint,
+ grpc_slice slice);
+
+#endif
diff --git a/contrib/libs/grpc/test/core/util/one_corpus_entry_fuzzer.cc b/contrib/libs/grpc/test/core/util/one_corpus_entry_fuzzer.cc
index 564dacad9a..2f376d6853 100644
--- a/contrib/libs/grpc/test/core/util/one_corpus_entry_fuzzer.cc
+++ b/contrib/libs/grpc/test/core/util/one_corpus_entry_fuzzer.cc
@@ -1,48 +1,48 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <stdbool.h>
-
-#include <grpc/grpc.h>
-
-#include <grpc/support/log.h>
-#include "src/core/lib/iomgr/exec_ctx.h"
-#include "src/core/lib/iomgr/load_file.h"
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
-
-extern bool squelch;
-extern bool leak_check;
-
-int main(int argc, char** argv) {
- grpc_slice buffer;
- squelch = false;
- leak_check = false;
- /* TODO(yashkt) Calling grpc_init breaks tests. Fix the tests and replace
- * grpc_core::ExecCtx::GlobalInit with grpc_init and GlobalShutdown with
- * grpc_shutdown */
- GPR_ASSERT(argc > 1); /* Make sure that we have a filename argument */
- GPR_ASSERT(
- GRPC_LOG_IF_ERROR("load_file", grpc_load_file(argv[1], 0, &buffer)));
- LLVMFuzzerTestOneInput(GRPC_SLICE_START_PTR(buffer),
- GRPC_SLICE_LENGTH(buffer));
- grpc_core::ExecCtx::GlobalInit();
- grpc_slice_unref(buffer);
- grpc_core::ExecCtx::GlobalShutdown();
- return 0;
-}
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <stdbool.h>
+
+#include <grpc/grpc.h>
+
+#include <grpc/support/log.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
+#include "src/core/lib/iomgr/load_file.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
+
+extern bool squelch;
+extern bool leak_check;
+
+int main(int argc, char** argv) {
+ grpc_slice buffer;
+ squelch = false;
+ leak_check = false;
+ /* TODO(yashkt) Calling grpc_init breaks tests. Fix the tests and replace
+ * grpc_core::ExecCtx::GlobalInit with grpc_init and GlobalShutdown with
+ * grpc_shutdown */
+ GPR_ASSERT(argc > 1); /* Make sure that we have a filename argument */
+ GPR_ASSERT(
+ GRPC_LOG_IF_ERROR("load_file", grpc_load_file(argv[1], 0, &buffer)));
+ LLVMFuzzerTestOneInput(GRPC_SLICE_START_PTR(buffer),
+ GRPC_SLICE_LENGTH(buffer));
+ grpc_core::ExecCtx::GlobalInit();
+ grpc_slice_unref(buffer);
+ grpc_core::ExecCtx::GlobalShutdown();
+ return 0;
+}
diff --git a/contrib/libs/grpc/test/core/util/parse_hexstring.cc b/contrib/libs/grpc/test/core/util/parse_hexstring.cc
index a4830e87fc..cd64843bd3 100644
--- a/contrib/libs/grpc/test/core/util/parse_hexstring.cc
+++ b/contrib/libs/grpc/test/core/util/parse_hexstring.cc
@@ -1,56 +1,56 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/parse_hexstring.h"
-#include <grpc/support/log.h>
-
-grpc_slice parse_hexstring(const char* hexstring) {
- size_t nibbles = 0;
- const char* p = nullptr;
- uint8_t* out;
- uint8_t temp;
- grpc_slice slice;
-
- for (p = hexstring; *p; p++) {
- nibbles += (*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f');
- }
-
- GPR_ASSERT((nibbles & 1) == 0);
-
- slice = grpc_slice_malloc(nibbles / 2);
- out = GRPC_SLICE_START_PTR(slice);
-
- nibbles = 0;
- temp = 0;
- for (p = hexstring; *p; p++) {
- if (*p >= '0' && *p <= '9') {
- temp = static_cast<uint8_t>(temp << 4) | static_cast<uint8_t>(*p - '0');
- nibbles++;
- } else if (*p >= 'a' && *p <= 'f') {
- temp =
- static_cast<uint8_t>(temp << 4) | static_cast<uint8_t>(*p - 'a' + 10);
- nibbles++;
- }
- if (nibbles == 2) {
- *out++ = temp;
- nibbles = 0;
- }
- }
-
- return slice;
-}
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/parse_hexstring.h"
+#include <grpc/support/log.h>
+
+grpc_slice parse_hexstring(const char* hexstring) {
+ size_t nibbles = 0;
+ const char* p = nullptr;
+ uint8_t* out;
+ uint8_t temp;
+ grpc_slice slice;
+
+ for (p = hexstring; *p; p++) {
+ nibbles += (*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f');
+ }
+
+ GPR_ASSERT((nibbles & 1) == 0);
+
+ slice = grpc_slice_malloc(nibbles / 2);
+ out = GRPC_SLICE_START_PTR(slice);
+
+ nibbles = 0;
+ temp = 0;
+ for (p = hexstring; *p; p++) {
+ if (*p >= '0' && *p <= '9') {
+ temp = static_cast<uint8_t>(temp << 4) | static_cast<uint8_t>(*p - '0');
+ nibbles++;
+ } else if (*p >= 'a' && *p <= 'f') {
+ temp =
+ static_cast<uint8_t>(temp << 4) | static_cast<uint8_t>(*p - 'a' + 10);
+ nibbles++;
+ }
+ if (nibbles == 2) {
+ *out++ = temp;
+ nibbles = 0;
+ }
+ }
+
+ return slice;
+}
diff --git a/contrib/libs/grpc/test/core/util/parse_hexstring.h b/contrib/libs/grpc/test/core/util/parse_hexstring.h
index 0c0419461e..b7d54c1711 100644
--- a/contrib/libs/grpc/test/core/util/parse_hexstring.h
+++ b/contrib/libs/grpc/test/core/util/parse_hexstring.h
@@ -1,26 +1,26 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_PARSE_HEXSTRING_H
-#define GRPC_TEST_CORE_UTIL_PARSE_HEXSTRING_H
-
-#include <grpc/slice.h>
-
-grpc_slice parse_hexstring(const char* hexstring);
-
-#endif /* GRPC_TEST_CORE_UTIL_PARSE_HEXSTRING_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_PARSE_HEXSTRING_H
+#define GRPC_TEST_CORE_UTIL_PARSE_HEXSTRING_H
+
+#include <grpc/slice.h>
+
+grpc_slice parse_hexstring(const char* hexstring);
+
+#endif /* GRPC_TEST_CORE_UTIL_PARSE_HEXSTRING_H */
diff --git a/contrib/libs/grpc/test/core/util/passthru_endpoint.cc b/contrib/libs/grpc/test/core/util/passthru_endpoint.cc
index f2224260af..a5baafad02 100644
--- a/contrib/libs/grpc/test/core/util/passthru_endpoint.cc
+++ b/contrib/libs/grpc/test/core/util/passthru_endpoint.cc
@@ -1,164 +1,164 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
- using that endpoint. Because of various transitive includes in uv.h,
- including windows.h on Windows, uv.h must be included before other system
- headers. Therefore, sockaddr.h must always be included first */
-#include "src/core/lib/iomgr/sockaddr.h"
-
-#include "test/core/util/passthru_endpoint.h"
-
-#include <inttypes.h>
-#include <string.h>
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
+ using that endpoint. Because of various transitive includes in uv.h,
+ including windows.h on Windows, uv.h must be included before other system
+ headers. Therefore, sockaddr.h must always be included first */
+#include "src/core/lib/iomgr/sockaddr.h"
+
+#include "test/core/util/passthru_endpoint.h"
+
+#include <inttypes.h>
+#include <string.h>
+
#include <util/generic/string.h>
#include "y_absl/strings/str_format.h"
-#include <grpc/support/alloc.h>
-#include <grpc/support/string_util.h>
-#include "src/core/lib/iomgr/sockaddr.h"
-
-#include "src/core/lib/slice/slice_internal.h"
-
-typedef struct passthru_endpoint passthru_endpoint;
-
-typedef struct {
- grpc_endpoint base;
- passthru_endpoint* parent;
- grpc_slice_buffer read_buffer;
- grpc_slice_buffer* on_read_out;
- grpc_closure* on_read;
- grpc_resource_user* resource_user;
-} half;
-
-struct passthru_endpoint {
- gpr_mu mu;
- int halves;
- grpc_passthru_endpoint_stats* stats;
- bool shutdown;
- half client;
- half server;
-};
-
-static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb, bool /*urgent*/) {
- half* m = reinterpret_cast<half*>(ep);
- gpr_mu_lock(&m->parent->mu);
- if (m->parent->shutdown) {
- grpc_core::ExecCtx::Run(
- DEBUG_LOCATION, cb,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already shutdown"));
- } else if (m->read_buffer.count > 0) {
- grpc_slice_buffer_swap(&m->read_buffer, slices);
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
- } else {
- m->on_read = cb;
- m->on_read_out = slices;
- }
- gpr_mu_unlock(&m->parent->mu);
-}
-
-static half* other_half(half* h) {
- if (h == &h->parent->client) return &h->parent->server;
- return &h->parent->client;
-}
-
-static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb, void* /*arg*/) {
- half* m = other_half(reinterpret_cast<half*>(ep));
- gpr_mu_lock(&m->parent->mu);
- grpc_error* error = GRPC_ERROR_NONE;
- gpr_atm_no_barrier_fetch_add(&m->parent->stats->num_writes, (gpr_atm)1);
- if (m->parent->shutdown) {
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
- } else if (m->on_read != nullptr) {
- for (size_t i = 0; i < slices->count; i++) {
- grpc_slice_buffer_add(m->on_read_out, grpc_slice_copy(slices->slices[i]));
- }
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, m->on_read, GRPC_ERROR_NONE);
- m->on_read = nullptr;
- } else {
- for (size_t i = 0; i < slices->count; i++) {
- grpc_slice_buffer_add(&m->read_buffer,
- grpc_slice_copy(slices->slices[i]));
- }
- }
- gpr_mu_unlock(&m->parent->mu);
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
-}
-
-static void me_add_to_pollset(grpc_endpoint* /*ep*/,
- grpc_pollset* /*pollset*/) {}
-
-static void me_add_to_pollset_set(grpc_endpoint* /*ep*/,
- grpc_pollset_set* /*pollset*/) {}
-
-static void me_delete_from_pollset_set(grpc_endpoint* /*ep*/,
- grpc_pollset_set* /*pollset*/) {}
-
-static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
- half* m = reinterpret_cast<half*>(ep);
- gpr_mu_lock(&m->parent->mu);
- m->parent->shutdown = true;
- if (m->on_read) {
- grpc_core::ExecCtx::Run(
- DEBUG_LOCATION, m->on_read,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
- m->on_read = nullptr;
- }
- m = other_half(m);
- if (m->on_read) {
- grpc_core::ExecCtx::Run(
- DEBUG_LOCATION, m->on_read,
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
- m->on_read = nullptr;
- }
- gpr_mu_unlock(&m->parent->mu);
- grpc_resource_user_shutdown(m->resource_user);
- GRPC_ERROR_UNREF(why);
-}
-
-static void me_destroy(grpc_endpoint* ep) {
- passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent;
- gpr_mu_lock(&p->mu);
- if (0 == --p->halves) {
- gpr_mu_unlock(&p->mu);
- gpr_mu_destroy(&p->mu);
- grpc_passthru_endpoint_stats_destroy(p->stats);
- grpc_slice_buffer_destroy_internal(&p->client.read_buffer);
- grpc_slice_buffer_destroy_internal(&p->server.read_buffer);
- grpc_resource_user_unref(p->client.resource_user);
- grpc_resource_user_unref(p->server.resource_user);
- gpr_free(p);
- } else {
- gpr_mu_unlock(&p->mu);
- }
-}
-
+#include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
+#include "src/core/lib/iomgr/sockaddr.h"
+
+#include "src/core/lib/slice/slice_internal.h"
+
+typedef struct passthru_endpoint passthru_endpoint;
+
+typedef struct {
+ grpc_endpoint base;
+ passthru_endpoint* parent;
+ grpc_slice_buffer read_buffer;
+ grpc_slice_buffer* on_read_out;
+ grpc_closure* on_read;
+ grpc_resource_user* resource_user;
+} half;
+
+struct passthru_endpoint {
+ gpr_mu mu;
+ int halves;
+ grpc_passthru_endpoint_stats* stats;
+ bool shutdown;
+ half client;
+ half server;
+};
+
+static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+ grpc_closure* cb, bool /*urgent*/) {
+ half* m = reinterpret_cast<half*>(ep);
+ gpr_mu_lock(&m->parent->mu);
+ if (m->parent->shutdown) {
+ grpc_core::ExecCtx::Run(
+ DEBUG_LOCATION, cb,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already shutdown"));
+ } else if (m->read_buffer.count > 0) {
+ grpc_slice_buffer_swap(&m->read_buffer, slices);
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, GRPC_ERROR_NONE);
+ } else {
+ m->on_read = cb;
+ m->on_read_out = slices;
+ }
+ gpr_mu_unlock(&m->parent->mu);
+}
+
+static half* other_half(half* h) {
+ if (h == &h->parent->client) return &h->parent->server;
+ return &h->parent->client;
+}
+
+static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+ grpc_closure* cb, void* /*arg*/) {
+ half* m = other_half(reinterpret_cast<half*>(ep));
+ gpr_mu_lock(&m->parent->mu);
+ grpc_error* error = GRPC_ERROR_NONE;
+ gpr_atm_no_barrier_fetch_add(&m->parent->stats->num_writes, (gpr_atm)1);
+ if (m->parent->shutdown) {
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
+ } else if (m->on_read != nullptr) {
+ for (size_t i = 0; i < slices->count; i++) {
+ grpc_slice_buffer_add(m->on_read_out, grpc_slice_copy(slices->slices[i]));
+ }
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, m->on_read, GRPC_ERROR_NONE);
+ m->on_read = nullptr;
+ } else {
+ for (size_t i = 0; i < slices->count; i++) {
+ grpc_slice_buffer_add(&m->read_buffer,
+ grpc_slice_copy(slices->slices[i]));
+ }
+ }
+ gpr_mu_unlock(&m->parent->mu);
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
+}
+
+static void me_add_to_pollset(grpc_endpoint* /*ep*/,
+ grpc_pollset* /*pollset*/) {}
+
+static void me_add_to_pollset_set(grpc_endpoint* /*ep*/,
+ grpc_pollset_set* /*pollset*/) {}
+
+static void me_delete_from_pollset_set(grpc_endpoint* /*ep*/,
+ grpc_pollset_set* /*pollset*/) {}
+
+static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
+ half* m = reinterpret_cast<half*>(ep);
+ gpr_mu_lock(&m->parent->mu);
+ m->parent->shutdown = true;
+ if (m->on_read) {
+ grpc_core::ExecCtx::Run(
+ DEBUG_LOCATION, m->on_read,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
+ m->on_read = nullptr;
+ }
+ m = other_half(m);
+ if (m->on_read) {
+ grpc_core::ExecCtx::Run(
+ DEBUG_LOCATION, m->on_read,
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
+ m->on_read = nullptr;
+ }
+ gpr_mu_unlock(&m->parent->mu);
+ grpc_resource_user_shutdown(m->resource_user);
+ GRPC_ERROR_UNREF(why);
+}
+
+static void me_destroy(grpc_endpoint* ep) {
+ passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent;
+ gpr_mu_lock(&p->mu);
+ if (0 == --p->halves) {
+ gpr_mu_unlock(&p->mu);
+ gpr_mu_destroy(&p->mu);
+ grpc_passthru_endpoint_stats_destroy(p->stats);
+ grpc_slice_buffer_destroy_internal(&p->client.read_buffer);
+ grpc_slice_buffer_destroy_internal(&p->server.read_buffer);
+ grpc_resource_user_unref(p->client.resource_user);
+ grpc_resource_user_unref(p->server.resource_user);
+ gpr_free(p);
+ } else {
+ gpr_mu_unlock(&p->mu);
+ }
+}
+
static y_absl::string_view me_get_peer(grpc_endpoint* ep) {
- passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent;
- return (reinterpret_cast<half*>(ep)) == &p->client
+ passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent;
+ return (reinterpret_cast<half*>(ep)) == &p->client
? "fake:mock_client_endpoint"
: "fake:mock_server_endpoint";
-}
-
+}
+
static y_absl::string_view me_get_local_address(grpc_endpoint* ep) {
passthru_endpoint* p = (reinterpret_cast<half*>(ep))->parent;
return (reinterpret_cast<half*>(ep)) == &p->client
@@ -166,74 +166,74 @@ static y_absl::string_view me_get_local_address(grpc_endpoint* ep) {
: "fake:mock_server_endpoint";
}
-static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; }
-
-static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; }
-
-static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
- half* m = reinterpret_cast<half*>(ep);
- return m->resource_user;
-}
-
-static const grpc_endpoint_vtable vtable = {
- me_read,
- me_write,
- me_add_to_pollset,
- me_add_to_pollset_set,
- me_delete_from_pollset_set,
- me_shutdown,
- me_destroy,
- me_get_resource_user,
- me_get_peer,
+static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; }
+
+static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; }
+
+static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
+ half* m = reinterpret_cast<half*>(ep);
+ return m->resource_user;
+}
+
+static const grpc_endpoint_vtable vtable = {
+ me_read,
+ me_write,
+ me_add_to_pollset,
+ me_add_to_pollset_set,
+ me_delete_from_pollset_set,
+ me_shutdown,
+ me_destroy,
+ me_get_resource_user,
+ me_get_peer,
me_get_local_address,
- me_get_fd,
- me_can_track_err,
-};
-
-static void half_init(half* m, passthru_endpoint* parent,
- grpc_resource_quota* resource_quota,
- const char* half_name) {
- m->base.vtable = &vtable;
- m->parent = parent;
- grpc_slice_buffer_init(&m->read_buffer);
- m->on_read = nullptr;
+ me_get_fd,
+ me_can_track_err,
+};
+
+static void half_init(half* m, passthru_endpoint* parent,
+ grpc_resource_quota* resource_quota,
+ const char* half_name) {
+ m->base.vtable = &vtable;
+ m->parent = parent;
+ grpc_slice_buffer_init(&m->read_buffer);
+ m->on_read = nullptr;
TString name = y_absl::StrFormat("passthru_endpoint_%s_%" PRIxPTR,
half_name, (intptr_t)parent);
m->resource_user = grpc_resource_user_create(resource_quota, name.c_str());
-}
-
-void grpc_passthru_endpoint_create(grpc_endpoint** client,
- grpc_endpoint** server,
- grpc_resource_quota* resource_quota,
- grpc_passthru_endpoint_stats* stats) {
- passthru_endpoint* m =
- static_cast<passthru_endpoint*>(gpr_malloc(sizeof(*m)));
- m->halves = 2;
- m->shutdown = 0;
- if (stats == nullptr) {
- m->stats = grpc_passthru_endpoint_stats_create();
- } else {
- gpr_ref(&stats->refs);
- m->stats = stats;
- }
- half_init(&m->client, m, resource_quota, "client");
- half_init(&m->server, m, resource_quota, "server");
- gpr_mu_init(&m->mu);
- *client = &m->client.base;
- *server = &m->server.base;
-}
-
-grpc_passthru_endpoint_stats* grpc_passthru_endpoint_stats_create() {
- grpc_passthru_endpoint_stats* stats =
- static_cast<grpc_passthru_endpoint_stats*>(
- gpr_malloc(sizeof(grpc_passthru_endpoint_stats)));
- memset(stats, 0, sizeof(*stats));
- gpr_ref_init(&stats->refs, 1);
- return stats;
-}
-
-void grpc_passthru_endpoint_stats_destroy(grpc_passthru_endpoint_stats* stats) {
- if (gpr_unref(&stats->refs)) {
- gpr_free(stats);
- }
-}
+}
+
+void grpc_passthru_endpoint_create(grpc_endpoint** client,
+ grpc_endpoint** server,
+ grpc_resource_quota* resource_quota,
+ grpc_passthru_endpoint_stats* stats) {
+ passthru_endpoint* m =
+ static_cast<passthru_endpoint*>(gpr_malloc(sizeof(*m)));
+ m->halves = 2;
+ m->shutdown = 0;
+ if (stats == nullptr) {
+ m->stats = grpc_passthru_endpoint_stats_create();
+ } else {
+ gpr_ref(&stats->refs);
+ m->stats = stats;
+ }
+ half_init(&m->client, m, resource_quota, "client");
+ half_init(&m->server, m, resource_quota, "server");
+ gpr_mu_init(&m->mu);
+ *client = &m->client.base;
+ *server = &m->server.base;
+}
+
+grpc_passthru_endpoint_stats* grpc_passthru_endpoint_stats_create() {
+ grpc_passthru_endpoint_stats* stats =
+ static_cast<grpc_passthru_endpoint_stats*>(
+ gpr_malloc(sizeof(grpc_passthru_endpoint_stats)));
+ memset(stats, 0, sizeof(*stats));
+ gpr_ref_init(&stats->refs, 1);
+ return stats;
+}
+
+void grpc_passthru_endpoint_stats_destroy(grpc_passthru_endpoint_stats* stats) {
+ if (gpr_unref(&stats->refs)) {
+ gpr_free(stats);
+ }
+}
diff --git a/contrib/libs/grpc/test/core/util/passthru_endpoint.h b/contrib/libs/grpc/test/core/util/passthru_endpoint.h
index 039e5e0aa1..a46c775505 100644
--- a/contrib/libs/grpc/test/core/util/passthru_endpoint.h
+++ b/contrib/libs/grpc/test/core/util/passthru_endpoint.h
@@ -1,43 +1,43 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef MOCK_ENDPOINT_H
-#define MOCK_ENDPOINT_H
-
-#include <grpc/support/atm.h>
-
-#include "src/core/lib/iomgr/endpoint.h"
-
-/* The struct is refcounted, always use grpc_passthru_endpoint_stats_create and
- * grpc_passthru_endpoint_stats_destroy, rather then embedding it in your
- * objects by value. */
-typedef struct {
- gpr_refcount refs;
- gpr_atm num_writes;
-} grpc_passthru_endpoint_stats;
-
-void grpc_passthru_endpoint_create(grpc_endpoint** client,
- grpc_endpoint** server,
- grpc_resource_quota* resource_quota,
- grpc_passthru_endpoint_stats* stats);
-
-grpc_passthru_endpoint_stats* grpc_passthru_endpoint_stats_create();
-
-void grpc_passthru_endpoint_stats_destroy(grpc_passthru_endpoint_stats* stats);
-
-#endif
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef MOCK_ENDPOINT_H
+#define MOCK_ENDPOINT_H
+
+#include <grpc/support/atm.h>
+
+#include "src/core/lib/iomgr/endpoint.h"
+
+/* The struct is refcounted, always use grpc_passthru_endpoint_stats_create and
+ * grpc_passthru_endpoint_stats_destroy, rather then embedding it in your
+ * objects by value. */
+typedef struct {
+ gpr_refcount refs;
+ gpr_atm num_writes;
+} grpc_passthru_endpoint_stats;
+
+void grpc_passthru_endpoint_create(grpc_endpoint** client,
+ grpc_endpoint** server,
+ grpc_resource_quota* resource_quota,
+ grpc_passthru_endpoint_stats* stats);
+
+grpc_passthru_endpoint_stats* grpc_passthru_endpoint_stats_create();
+
+void grpc_passthru_endpoint_stats_destroy(grpc_passthru_endpoint_stats* stats);
+
+#endif
diff --git a/contrib/libs/grpc/test/core/util/port.cc b/contrib/libs/grpc/test/core/util/port.cc
index 936929fd66..5a34b6026f 100644
--- a/contrib/libs/grpc/test/core/util/port.cc
+++ b/contrib/libs/grpc/test/core/util/port.cc
@@ -1,143 +1,143 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/port.h"
-#include "test/core/util/test_config.h"
-#if defined(GRPC_TEST_PICK_PORT)
-
-#include "test/core/util/port.h"
-
-#include <stdbool.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/lib/http/httpcli.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/sockaddr_utils.h"
-#include "test/core/util/port_server_client.h"
-
-static int* chosen_ports = nullptr;
-static size_t num_chosen_ports = 0;
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/port.h"
+#include "test/core/util/test_config.h"
+#if defined(GRPC_TEST_PICK_PORT)
+
+#include "test/core/util/port.h"
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/http/httpcli.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "test/core/util/port_server_client.h"
+
+static int* chosen_ports = nullptr;
+static size_t num_chosen_ports = 0;
static grpc_core::Mutex* g_default_port_picker_mu;
static gpr_once g_default_port_picker_init = GPR_ONCE_INIT;
-
+
static void init_default_port_picker() {
g_default_port_picker_mu = new grpc_core::Mutex();
}
static int free_chosen_port_locked(int port) {
- size_t i;
- int found = 0;
- size_t found_at = 0;
- /* Find the port and erase it from the list, then tell the server it can be
- freed. */
- for (i = 0; i < num_chosen_ports; i++) {
- if (chosen_ports[i] == port) {
- GPR_ASSERT(found == 0);
- found = 1;
- found_at = i;
- }
- }
- if (found) {
- chosen_ports[found_at] = chosen_ports[num_chosen_ports - 1];
- num_chosen_ports--;
- grpc_free_port_using_server(port);
- }
- return found;
-}
-
-static void free_chosen_ports(void) {
+ size_t i;
+ int found = 0;
+ size_t found_at = 0;
+ /* Find the port and erase it from the list, then tell the server it can be
+ freed. */
+ for (i = 0; i < num_chosen_ports; i++) {
+ if (chosen_ports[i] == port) {
+ GPR_ASSERT(found == 0);
+ found = 1;
+ found_at = i;
+ }
+ }
+ if (found) {
+ chosen_ports[found_at] = chosen_ports[num_chosen_ports - 1];
+ num_chosen_ports--;
+ grpc_free_port_using_server(port);
+ }
+ return found;
+}
+
+static void free_chosen_ports(void) {
grpc_core::MutexLock lock(g_default_port_picker_mu);
- size_t i;
- grpc_init();
- for (i = 0; i < num_chosen_ports; i++) {
- grpc_free_port_using_server(chosen_ports[i]);
- }
- grpc_shutdown_blocking();
- gpr_free(chosen_ports);
-}
-
+ size_t i;
+ grpc_init();
+ for (i = 0; i < num_chosen_ports; i++) {
+ grpc_free_port_using_server(chosen_ports[i]);
+ }
+ grpc_shutdown_blocking();
+ gpr_free(chosen_ports);
+}
+
static void chose_port_locked(int port) {
- if (chosen_ports == nullptr) {
- atexit(free_chosen_ports);
- }
- num_chosen_ports++;
- chosen_ports = static_cast<int*>(
- gpr_realloc(chosen_ports, sizeof(int) * num_chosen_ports));
- chosen_ports[num_chosen_ports - 1] = port;
-}
-
-static int grpc_pick_unused_port_impl(void) {
+ if (chosen_ports == nullptr) {
+ atexit(free_chosen_ports);
+ }
+ num_chosen_ports++;
+ chosen_ports = static_cast<int*>(
+ gpr_realloc(chosen_ports, sizeof(int) * num_chosen_ports));
+ chosen_ports[num_chosen_ports - 1] = port;
+}
+
+static int grpc_pick_unused_port_impl(void) {
gpr_once_init(&g_default_port_picker_init, init_default_port_picker);
grpc_core::MutexLock lock(g_default_port_picker_mu);
- int port = grpc_pick_port_using_server();
- if (port != 0) {
+ int port = grpc_pick_port_using_server();
+ if (port != 0) {
chose_port_locked(port);
- }
-
- return port;
-}
-
-static int grpc_pick_unused_port_or_die_impl(void) {
- int port = grpc_pick_unused_port();
- if (port == 0) {
- fprintf(stderr,
- "gRPC tests require a helper port server to allocate ports used \n"
- "during the test.\n\n"
- "This server is not currently running.\n\n"
- "To start it, run tools/run_tests/start_port_server.py\n\n");
- exit(1);
- }
- return port;
-}
-
-static void grpc_recycle_unused_port_impl(int port) {
+ }
+
+ return port;
+}
+
+static int grpc_pick_unused_port_or_die_impl(void) {
+ int port = grpc_pick_unused_port();
+ if (port == 0) {
+ fprintf(stderr,
+ "gRPC tests require a helper port server to allocate ports used \n"
+ "during the test.\n\n"
+ "This server is not currently running.\n\n"
+ "To start it, run tools/run_tests/start_port_server.py\n\n");
+ exit(1);
+ }
+ return port;
+}
+
+static void grpc_recycle_unused_port_impl(int port) {
gpr_once_init(&g_default_port_picker_init, init_default_port_picker);
grpc_core::MutexLock lock(g_default_port_picker_mu);
GPR_ASSERT(free_chosen_port_locked(port));
-}
-
-static grpc_pick_port_functions g_pick_port_functions = {
- grpc_pick_unused_port_impl, grpc_pick_unused_port_or_die_impl,
- grpc_recycle_unused_port_impl};
-
-int grpc_pick_unused_port(void) {
- return g_pick_port_functions.pick_unused_port_fn();
-}
-
-int grpc_pick_unused_port_or_die(void) {
- return g_pick_port_functions.pick_unused_port_or_die_fn();
-}
-
-void grpc_recycle_unused_port(int port) {
- g_pick_port_functions.recycle_unused_port_fn(port);
-}
-
-void grpc_set_pick_port_functions(grpc_pick_port_functions functions) {
- GPR_ASSERT(functions.pick_unused_port_fn != nullptr);
- GPR_ASSERT(functions.pick_unused_port_or_die_fn != nullptr);
- GPR_ASSERT(functions.recycle_unused_port_fn != nullptr);
- g_pick_port_functions = functions;
-}
-
-#endif /* GRPC_TEST_PICK_PORT */
+}
+
+static grpc_pick_port_functions g_pick_port_functions = {
+ grpc_pick_unused_port_impl, grpc_pick_unused_port_or_die_impl,
+ grpc_recycle_unused_port_impl};
+
+int grpc_pick_unused_port(void) {
+ return g_pick_port_functions.pick_unused_port_fn();
+}
+
+int grpc_pick_unused_port_or_die(void) {
+ return g_pick_port_functions.pick_unused_port_or_die_fn();
+}
+
+void grpc_recycle_unused_port(int port) {
+ g_pick_port_functions.recycle_unused_port_fn(port);
+}
+
+void grpc_set_pick_port_functions(grpc_pick_port_functions functions) {
+ GPR_ASSERT(functions.pick_unused_port_fn != nullptr);
+ GPR_ASSERT(functions.pick_unused_port_or_die_fn != nullptr);
+ GPR_ASSERT(functions.recycle_unused_port_fn != nullptr);
+ g_pick_port_functions = functions;
+}
+
+#endif /* GRPC_TEST_PICK_PORT */
diff --git a/contrib/libs/grpc/test/core/util/port.h b/contrib/libs/grpc/test/core/util/port.h
index cf79445537..3a4cf4467a 100644
--- a/contrib/libs/grpc/test/core/util/port.h
+++ b/contrib/libs/grpc/test/core/util/port.h
@@ -1,44 +1,44 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_PORT_H
-#define GRPC_TEST_CORE_UTIL_PORT_H
-
-typedef struct grpc_pick_port_functions {
- int (*pick_unused_port_fn)(void);
- int (*pick_unused_port_or_die_fn)(void);
- void (*recycle_unused_port_fn)(int port);
-} grpc_pick_port_functions;
-
-/* pick a port number that is currently unused by either tcp or udp. return
- 0 on failure. */
-int grpc_pick_unused_port(void);
-/* pick a port number that is currently unused by either tcp or udp. abort
- on failure. */
-int grpc_pick_unused_port_or_die(void);
-
-/* Return a port which was previously returned by grpc_pick_unused_port().
- * Implementations of grpc_pick_unused_port() backed by a portserver may limit
- * the total number of ports available; this lets a binary return its allocated
- * ports back to the server if it is going to allocate a large number. */
-void grpc_recycle_unused_port(int port);
-
-/** Request the family of pick_port functions in \a functions be used. */
-void grpc_set_pick_port_functions(grpc_pick_port_functions functions);
-
-#endif /* GRPC_TEST_CORE_UTIL_PORT_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_PORT_H
+#define GRPC_TEST_CORE_UTIL_PORT_H
+
+typedef struct grpc_pick_port_functions {
+ int (*pick_unused_port_fn)(void);
+ int (*pick_unused_port_or_die_fn)(void);
+ void (*recycle_unused_port_fn)(int port);
+} grpc_pick_port_functions;
+
+/* pick a port number that is currently unused by either tcp or udp. return
+ 0 on failure. */
+int grpc_pick_unused_port(void);
+/* pick a port number that is currently unused by either tcp or udp. abort
+ on failure. */
+int grpc_pick_unused_port_or_die(void);
+
+/* Return a port which was previously returned by grpc_pick_unused_port().
+ * Implementations of grpc_pick_unused_port() backed by a portserver may limit
+ * the total number of ports available; this lets a binary return its allocated
+ * ports back to the server if it is going to allocate a large number. */
+void grpc_recycle_unused_port(int port);
+
+/** Request the family of pick_port functions in \a functions be used. */
+void grpc_set_pick_port_functions(grpc_pick_port_functions functions);
+
+#endif /* GRPC_TEST_CORE_UTIL_PORT_H */
diff --git a/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc b/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc
index 7d0598cd81..5e043e7f97 100644
--- a/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc
+++ b/contrib/libs/grpc/test/core/util/port_isolated_runtime_environment.cc
@@ -1,56 +1,56 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/* When individual tests run in an isolated runtime environment (e.g. each test
- * runs in a separate container) the framework takes a round-robin pick of a
- * port within certain range. There is no need to recycle ports.
- */
-#include <grpc/support/atm.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <stdlib.h>
-#include "src/core/lib/iomgr/port.h"
-#include "test/core/util/test_config.h"
-#if defined(GRPC_PORT_ISOLATED_RUNTIME)
-
-#include "test/core/util/port.h"
-
-#define MIN_PORT 1025
-#define MAX_PORT 32766
-
-static int get_random_port_offset() {
- srand(gpr_now(GPR_CLOCK_REALTIME).tv_nsec);
- double rnd = static_cast<double>(rand()) /
- (static_cast<double>(RAND_MAX) + 1.0); // values from [0,1)
- return static_cast<int>(rnd * (MAX_PORT - MIN_PORT + 1));
-}
-
-static int s_initial_offset = get_random_port_offset();
-static gpr_atm s_pick_counter = 0;
-
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* When individual tests run in an isolated runtime environment (e.g. each test
+ * runs in a separate container) the framework takes a round-robin pick of a
+ * port within certain range. There is no need to recycle ports.
+ */
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <stdlib.h>
+#include "src/core/lib/iomgr/port.h"
+#include "test/core/util/test_config.h"
+#if defined(GRPC_PORT_ISOLATED_RUNTIME)
+
+#include "test/core/util/port.h"
+
+#define MIN_PORT 1025
+#define MAX_PORT 32766
+
+static int get_random_port_offset() {
+ srand(gpr_now(GPR_CLOCK_REALTIME).tv_nsec);
+ double rnd = static_cast<double>(rand()) /
+ (static_cast<double>(RAND_MAX) + 1.0); // values from [0,1)
+ return static_cast<int>(rnd * (MAX_PORT - MIN_PORT + 1));
+}
+
+static int s_initial_offset = get_random_port_offset();
+static gpr_atm s_pick_counter = 0;
+
static int grpc_pick_unused_port_or_die_impl(void) {
- int orig_counter_val =
- static_cast<int>(gpr_atm_full_fetch_add(&s_pick_counter, 1));
- GPR_ASSERT(orig_counter_val < (MAX_PORT - MIN_PORT + 1));
- return MIN_PORT +
- (s_initial_offset + orig_counter_val) % (MAX_PORT - MIN_PORT + 1);
-}
-
+ int orig_counter_val =
+ static_cast<int>(gpr_atm_full_fetch_add(&s_pick_counter, 1));
+ GPR_ASSERT(orig_counter_val < (MAX_PORT - MIN_PORT + 1));
+ return MIN_PORT +
+ (s_initial_offset + orig_counter_val) % (MAX_PORT - MIN_PORT + 1);
+}
+
int grpc_pick_unused_port_or_die(void) {
while (true) {
int port = grpc_pick_unused_port_or_die_impl();
@@ -64,6 +64,6 @@ int grpc_pick_unused_port_or_die(void) {
}
}
-void grpc_recycle_unused_port(int port) { (void)port; }
-
-#endif /* GRPC_PORT_ISOLATED_RUNTIME */
+void grpc_recycle_unused_port(int port) { (void)port; }
+
+#endif /* GRPC_PORT_ISOLATED_RUNTIME */
diff --git a/contrib/libs/grpc/test/core/util/port_server_client.cc b/contrib/libs/grpc/test/core/util/port_server_client.cc
index 3f27a1687d..973068dcda 100644
--- a/contrib/libs/grpc/test/core/util/port_server_client.cc
+++ b/contrib/libs/grpc/test/core/util/port_server_client.cc
@@ -1,89 +1,89 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-#include "test/core/util/test_config.h"
-
-#ifdef GRPC_TEST_PICK_PORT
-#include "test/core/util/port_server_client.h"
-
-#include <math.h>
-#include <string.h>
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-
-#include "src/core/lib/http/httpcli.h"
-
-typedef struct freereq {
- gpr_mu* mu = nullptr;
- grpc_polling_entity pops = {};
- int done = 0;
-} freereq;
-
-static void destroy_pops_and_shutdown(void* p, grpc_error* /*error*/) {
- grpc_pollset* pollset =
- grpc_polling_entity_pollset(static_cast<grpc_polling_entity*>(p));
- grpc_pollset_destroy(pollset);
- gpr_free(pollset);
-}
-
-static void freed_port_from_server(void* arg, grpc_error* /*error*/) {
- freereq* pr = static_cast<freereq*>(arg);
- gpr_mu_lock(pr->mu);
- pr->done = 1;
- GRPC_LOG_IF_ERROR(
- "pollset_kick",
- grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
- gpr_mu_unlock(pr->mu);
-}
-
-void grpc_free_port_using_server(int port) {
- grpc_httpcli_context context;
- grpc_httpcli_request req;
- grpc_httpcli_response rsp;
- freereq pr;
- char* path;
- grpc_closure* shutdown_closure;
-
- grpc_init();
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+#include "test/core/util/test_config.h"
+
+#ifdef GRPC_TEST_PICK_PORT
+#include "test/core/util/port_server_client.h"
+
+#include <math.h>
+#include <string.h>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+
+#include "src/core/lib/http/httpcli.h"
+
+typedef struct freereq {
+ gpr_mu* mu = nullptr;
+ grpc_polling_entity pops = {};
+ int done = 0;
+} freereq;
+
+static void destroy_pops_and_shutdown(void* p, grpc_error* /*error*/) {
+ grpc_pollset* pollset =
+ grpc_polling_entity_pollset(static_cast<grpc_polling_entity*>(p));
+ grpc_pollset_destroy(pollset);
+ gpr_free(pollset);
+}
+
+static void freed_port_from_server(void* arg, grpc_error* /*error*/) {
+ freereq* pr = static_cast<freereq*>(arg);
+ gpr_mu_lock(pr->mu);
+ pr->done = 1;
+ GRPC_LOG_IF_ERROR(
+ "pollset_kick",
+ grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
+ gpr_mu_unlock(pr->mu);
+}
+
+void grpc_free_port_using_server(int port) {
+ grpc_httpcli_context context;
+ grpc_httpcli_request req;
+ grpc_httpcli_response rsp;
+ freereq pr;
+ char* path;
+ grpc_closure* shutdown_closure;
+
+ grpc_init();
{
grpc_core::ExecCtx exec_ctx;
-
+
pr = {};
memset(&req, 0, sizeof(req));
rsp = {};
-
+
grpc_pollset* pollset =
static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
grpc_pollset_init(pollset, &pr.mu);
pr.pops = grpc_polling_entity_create_from_pollset(pollset);
shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops,
grpc_schedule_on_exec_ctx);
-
+
req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
gpr_asprintf(&path, "/drop/%d", port);
req.http.path = path;
-
+
grpc_httpcli_context_init(&context);
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("port_server_client/free");
@@ -104,153 +104,153 @@ void grpc_free_port_using_server(int port) {
grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) {
pr.done = 1;
}
- }
+ }
gpr_mu_unlock(pr.mu);
-
+
grpc_httpcli_context_destroy(&context);
grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops),
shutdown_closure);
-
+
gpr_free(path);
grpc_http_response_destroy(&rsp);
}
- grpc_shutdown();
-}
-
-typedef struct portreq {
- gpr_mu* mu = nullptr;
- grpc_polling_entity pops = {};
- int port = 0;
- int retries = 0;
- char* server = nullptr;
- grpc_httpcli_context* ctx = nullptr;
- grpc_httpcli_response response = {};
-} portreq;
-
-static void got_port_from_server(void* arg, grpc_error* error) {
- size_t i;
- int port = 0;
- portreq* pr = static_cast<portreq*>(arg);
- int failed = 0;
- grpc_httpcli_response* response = &pr->response;
-
- if (error != GRPC_ERROR_NONE) {
- failed = 1;
- const char* msg = grpc_error_string(error);
- gpr_log(GPR_DEBUG, "failed port pick from server: retrying [%s]", msg);
-
- } else if (response->status != 200) {
- failed = 1;
- gpr_log(GPR_DEBUG, "failed port pick from server: status=%d",
- response->status);
- }
-
- if (failed) {
- grpc_httpcli_request req;
- memset(&req, 0, sizeof(req));
- if (pr->retries >= 5) {
- gpr_mu_lock(pr->mu);
- pr->port = 0;
- GRPC_LOG_IF_ERROR(
- "pollset_kick",
- grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
- gpr_mu_unlock(pr->mu);
- return;
- }
- GPR_ASSERT(pr->retries < 10);
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(
- static_cast<int64_t>(
- 1000.0 * (1 + pow(1.3, pr->retries) * rand() / RAND_MAX)),
- GPR_TIMESPAN)));
- pr->retries++;
- req.host = pr->server;
- req.http.path = const_cast<char*>("/get");
- grpc_http_response_destroy(&pr->response);
- pr->response = {};
- grpc_resource_quota* resource_quota =
- grpc_resource_quota_create("port_server_client/pick_retry");
- grpc_httpcli_get(pr->ctx, &pr->pops, resource_quota, &req,
- grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
- GRPC_CLOSURE_CREATE(got_port_from_server, pr,
- grpc_schedule_on_exec_ctx),
- &pr->response);
- grpc_resource_quota_unref_internal(resource_quota);
- return;
- }
- GPR_ASSERT(response);
- GPR_ASSERT(response->status == 200);
- for (i = 0; i < response->body_length; i++) {
- GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9');
- port = port * 10 + response->body[i] - '0';
- }
- GPR_ASSERT(port > 1024);
- gpr_mu_lock(pr->mu);
- pr->port = port;
- GRPC_LOG_IF_ERROR(
- "pollset_kick",
- grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
- gpr_mu_unlock(pr->mu);
-}
-
-int grpc_pick_port_using_server(void) {
- grpc_httpcli_context context;
- grpc_httpcli_request req;
- portreq pr;
- grpc_closure* shutdown_closure;
-
- grpc_init();
- {
- grpc_core::ExecCtx exec_ctx;
- pr = {};
- memset(&req, 0, sizeof(req));
- grpc_pollset* pollset =
- static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
- grpc_pollset_init(pollset, &pr.mu);
- pr.pops = grpc_polling_entity_create_from_pollset(pollset);
- shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops,
- grpc_schedule_on_exec_ctx);
- pr.port = -1;
- pr.server = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
- pr.ctx = &context;
-
- req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
- req.http.path = const_cast<char*>("/get");
-
- grpc_httpcli_context_init(&context);
- grpc_resource_quota* resource_quota =
- grpc_resource_quota_create("port_server_client/pick");
- grpc_httpcli_get(&context, &pr.pops, resource_quota, &req,
- grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
- GRPC_CLOSURE_CREATE(got_port_from_server, &pr,
- grpc_schedule_on_exec_ctx),
- &pr.response);
- grpc_resource_quota_unref_internal(resource_quota);
- grpc_core::ExecCtx::Get()->Flush();
- gpr_mu_lock(pr.mu);
- while (pr.port == -1) {
- grpc_pollset_worker* worker = nullptr;
- if (!GRPC_LOG_IF_ERROR(
- "pollset_work",
- grpc_pollset_work(
- grpc_polling_entity_pollset(&pr.pops), &worker,
- grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) {
- pr.port = 0;
- }
- }
- gpr_mu_unlock(pr.mu);
-
- grpc_http_response_destroy(&pr.response);
- grpc_httpcli_context_destroy(&context);
- grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops),
- shutdown_closure);
-
- grpc_core::ExecCtx::Get()->Flush();
- }
- grpc_shutdown();
-
- return pr.port;
-}
-
-#endif // GRPC_TEST_PICK_PORT
+ grpc_shutdown();
+}
+
+typedef struct portreq {
+ gpr_mu* mu = nullptr;
+ grpc_polling_entity pops = {};
+ int port = 0;
+ int retries = 0;
+ char* server = nullptr;
+ grpc_httpcli_context* ctx = nullptr;
+ grpc_httpcli_response response = {};
+} portreq;
+
+static void got_port_from_server(void* arg, grpc_error* error) {
+ size_t i;
+ int port = 0;
+ portreq* pr = static_cast<portreq*>(arg);
+ int failed = 0;
+ grpc_httpcli_response* response = &pr->response;
+
+ if (error != GRPC_ERROR_NONE) {
+ failed = 1;
+ const char* msg = grpc_error_string(error);
+ gpr_log(GPR_DEBUG, "failed port pick from server: retrying [%s]", msg);
+
+ } else if (response->status != 200) {
+ failed = 1;
+ gpr_log(GPR_DEBUG, "failed port pick from server: status=%d",
+ response->status);
+ }
+
+ if (failed) {
+ grpc_httpcli_request req;
+ memset(&req, 0, sizeof(req));
+ if (pr->retries >= 5) {
+ gpr_mu_lock(pr->mu);
+ pr->port = 0;
+ GRPC_LOG_IF_ERROR(
+ "pollset_kick",
+ grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
+ gpr_mu_unlock(pr->mu);
+ return;
+ }
+ GPR_ASSERT(pr->retries < 10);
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(
+ static_cast<int64_t>(
+ 1000.0 * (1 + pow(1.3, pr->retries) * rand() / RAND_MAX)),
+ GPR_TIMESPAN)));
+ pr->retries++;
+ req.host = pr->server;
+ req.http.path = const_cast<char*>("/get");
+ grpc_http_response_destroy(&pr->response);
+ pr->response = {};
+ grpc_resource_quota* resource_quota =
+ grpc_resource_quota_create("port_server_client/pick_retry");
+ grpc_httpcli_get(pr->ctx, &pr->pops, resource_quota, &req,
+ grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
+ GRPC_CLOSURE_CREATE(got_port_from_server, pr,
+ grpc_schedule_on_exec_ctx),
+ &pr->response);
+ grpc_resource_quota_unref_internal(resource_quota);
+ return;
+ }
+ GPR_ASSERT(response);
+ GPR_ASSERT(response->status == 200);
+ for (i = 0; i < response->body_length; i++) {
+ GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9');
+ port = port * 10 + response->body[i] - '0';
+ }
+ GPR_ASSERT(port > 1024);
+ gpr_mu_lock(pr->mu);
+ pr->port = port;
+ GRPC_LOG_IF_ERROR(
+ "pollset_kick",
+ grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
+ gpr_mu_unlock(pr->mu);
+}
+
+int grpc_pick_port_using_server(void) {
+ grpc_httpcli_context context;
+ grpc_httpcli_request req;
+ portreq pr;
+ grpc_closure* shutdown_closure;
+
+ grpc_init();
+ {
+ grpc_core::ExecCtx exec_ctx;
+ pr = {};
+ memset(&req, 0, sizeof(req));
+ grpc_pollset* pollset =
+ static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+ grpc_pollset_init(pollset, &pr.mu);
+ pr.pops = grpc_polling_entity_create_from_pollset(pollset);
+ shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops,
+ grpc_schedule_on_exec_ctx);
+ pr.port = -1;
+ pr.server = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
+ pr.ctx = &context;
+
+ req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
+ req.http.path = const_cast<char*>("/get");
+
+ grpc_httpcli_context_init(&context);
+ grpc_resource_quota* resource_quota =
+ grpc_resource_quota_create("port_server_client/pick");
+ grpc_httpcli_get(&context, &pr.pops, resource_quota, &req,
+ grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
+ GRPC_CLOSURE_CREATE(got_port_from_server, &pr,
+ grpc_schedule_on_exec_ctx),
+ &pr.response);
+ grpc_resource_quota_unref_internal(resource_quota);
+ grpc_core::ExecCtx::Get()->Flush();
+ gpr_mu_lock(pr.mu);
+ while (pr.port == -1) {
+ grpc_pollset_worker* worker = nullptr;
+ if (!GRPC_LOG_IF_ERROR(
+ "pollset_work",
+ grpc_pollset_work(
+ grpc_polling_entity_pollset(&pr.pops), &worker,
+ grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) {
+ pr.port = 0;
+ }
+ }
+ gpr_mu_unlock(pr.mu);
+
+ grpc_http_response_destroy(&pr.response);
+ grpc_httpcli_context_destroy(&context);
+ grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops),
+ shutdown_closure);
+
+ grpc_core::ExecCtx::Get()->Flush();
+ }
+ grpc_shutdown();
+
+ return pr.port;
+}
+
+#endif // GRPC_TEST_PICK_PORT
diff --git a/contrib/libs/grpc/test/core/util/port_server_client.h b/contrib/libs/grpc/test/core/util/port_server_client.h
index 6eac403714..86dd7018ff 100644
--- a/contrib/libs/grpc/test/core/util/port_server_client.h
+++ b/contrib/libs/grpc/test/core/util/port_server_client.h
@@ -1,30 +1,30 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_PORT_SERVER_CLIENT_H
-#define GRPC_TEST_CORE_UTIL_PORT_SERVER_CLIENT_H
-
-// C interface to port_server.py
-
-// must be synchronized with tools/run_tests/python_utils/start_port_server.py
-#define GRPC_PORT_SERVER_ADDRESS "localhost:32766"
-
-int grpc_pick_port_using_server(void);
-void grpc_free_port_using_server(int port);
-
-#endif // GRPC_TEST_CORE_UTIL_PORT_SERVER_CLIENT_H
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_PORT_SERVER_CLIENT_H
+#define GRPC_TEST_CORE_UTIL_PORT_SERVER_CLIENT_H
+
+// C interface to port_server.py
+
+// must be synchronized with tools/run_tests/python_utils/start_port_server.py
+#define GRPC_PORT_SERVER_ADDRESS "localhost:32766"
+
+int grpc_pick_port_using_server(void);
+void grpc_free_port_using_server(int port);
+
+#endif // GRPC_TEST_CORE_UTIL_PORT_SERVER_CLIENT_H
diff --git a/contrib/libs/grpc/test/core/util/reconnect_server.cc b/contrib/libs/grpc/test/core/util/reconnect_server.cc
index 951953a69c..070ab3fa0a 100644
--- a/contrib/libs/grpc/test/core/util/reconnect_server.cc
+++ b/contrib/libs/grpc/test/core/util/reconnect_server.cc
@@ -1,75 +1,75 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/reconnect_server.h"
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/reconnect_server.h"
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-#include <string.h>
-
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+#include <string.h>
+
#include "y_absl/strings/string_view.h"
-#include "src/core/lib/iomgr/endpoint.h"
-#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/iomgr/tcp_server.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_tcp_server.h"
-
-static void pretty_print_backoffs(reconnect_server* server) {
- gpr_timespec diff;
- int i = 1;
- double expected_backoff = 1000.0, backoff;
- timestamp_list* head = server->head;
- gpr_log(GPR_INFO, "reconnect server: new connection");
- for (head = server->head; head && head->next; head = head->next, i++) {
- diff = gpr_time_sub(head->next->timestamp, head->timestamp);
- backoff = gpr_time_to_millis(diff);
- gpr_log(GPR_INFO,
- "retry %2d:backoff %6.2fs,expected backoff %6.2fs, jitter %4.2f%%",
- i, backoff / 1000.0, expected_backoff / 1000.0,
- (backoff - expected_backoff) * 100.0 / expected_backoff);
- expected_backoff *= 1.6;
- int max_reconnect_backoff_ms = 120 * 1000;
- if (server->max_reconnect_backoff_ms > 0) {
- max_reconnect_backoff_ms = server->max_reconnect_backoff_ms;
- }
- if (expected_backoff > max_reconnect_backoff_ms) {
- expected_backoff = max_reconnect_backoff_ms;
- }
- }
-}
-
-static void on_connect(void* arg, grpc_endpoint* tcp,
- grpc_pollset* /*accepting_pollset*/,
- grpc_tcp_server_acceptor* acceptor) {
- gpr_free(acceptor);
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_tcp_server.h"
+
+static void pretty_print_backoffs(reconnect_server* server) {
+ gpr_timespec diff;
+ int i = 1;
+ double expected_backoff = 1000.0, backoff;
+ timestamp_list* head = server->head;
+ gpr_log(GPR_INFO, "reconnect server: new connection");
+ for (head = server->head; head && head->next; head = head->next, i++) {
+ diff = gpr_time_sub(head->next->timestamp, head->timestamp);
+ backoff = gpr_time_to_millis(diff);
+ gpr_log(GPR_INFO,
+ "retry %2d:backoff %6.2fs,expected backoff %6.2fs, jitter %4.2f%%",
+ i, backoff / 1000.0, expected_backoff / 1000.0,
+ (backoff - expected_backoff) * 100.0 / expected_backoff);
+ expected_backoff *= 1.6;
+ int max_reconnect_backoff_ms = 120 * 1000;
+ if (server->max_reconnect_backoff_ms > 0) {
+ max_reconnect_backoff_ms = server->max_reconnect_backoff_ms;
+ }
+ if (expected_backoff > max_reconnect_backoff_ms) {
+ expected_backoff = max_reconnect_backoff_ms;
+ }
+ }
+}
+
+static void on_connect(void* arg, grpc_endpoint* tcp,
+ grpc_pollset* /*accepting_pollset*/,
+ grpc_tcp_server_acceptor* acceptor) {
+ gpr_free(acceptor);
y_absl::string_view peer;
int last_colon;
- reconnect_server* server = static_cast<reconnect_server*>(arg);
- gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
- timestamp_list* new_tail;
- peer = grpc_endpoint_get_peer(tcp);
- grpc_endpoint_shutdown(tcp,
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected"));
- grpc_endpoint_destroy(tcp);
+ reconnect_server* server = static_cast<reconnect_server*>(arg);
+ gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
+ timestamp_list* new_tail;
+ peer = grpc_endpoint_get_peer(tcp);
+ grpc_endpoint_shutdown(tcp,
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected"));
+ grpc_endpoint_destroy(tcp);
last_colon = peer.rfind(':');
if (server->peer == nullptr) {
server->peer = new TString(peer);
@@ -80,50 +80,50 @@ static void on_connect(void* arg, grpc_endpoint* tcp,
*server->peer) != 0) {
gpr_log(GPR_ERROR, "mismatched peer! %s vs %s", server->peer->c_str(),
TString(peer).c_str());
- }
- }
- new_tail = static_cast<timestamp_list*>(gpr_malloc(sizeof(timestamp_list)));
- new_tail->timestamp = now;
- new_tail->next = nullptr;
- if (server->tail == nullptr) {
- server->head = new_tail;
- server->tail = new_tail;
- } else {
- server->tail->next = new_tail;
- server->tail = new_tail;
- }
- pretty_print_backoffs(server);
-}
-
-void reconnect_server_init(reconnect_server* server) {
- test_tcp_server_init(&server->tcp_server, on_connect, server);
- server->head = nullptr;
- server->tail = nullptr;
- server->peer = nullptr;
- server->max_reconnect_backoff_ms = 0;
-}
-
-void reconnect_server_start(reconnect_server* server, int port) {
- test_tcp_server_start(&server->tcp_server, port);
-}
-
-void reconnect_server_poll(reconnect_server* server, int seconds) {
- test_tcp_server_poll(&server->tcp_server, 1000 * seconds);
-}
-
-void reconnect_server_clear_timestamps(reconnect_server* server) {
- timestamp_list* new_head = server->head;
- while (server->head) {
- new_head = server->head->next;
- gpr_free(server->head);
- server->head = new_head;
- }
- server->tail = nullptr;
+ }
+ }
+ new_tail = static_cast<timestamp_list*>(gpr_malloc(sizeof(timestamp_list)));
+ new_tail->timestamp = now;
+ new_tail->next = nullptr;
+ if (server->tail == nullptr) {
+ server->head = new_tail;
+ server->tail = new_tail;
+ } else {
+ server->tail->next = new_tail;
+ server->tail = new_tail;
+ }
+ pretty_print_backoffs(server);
+}
+
+void reconnect_server_init(reconnect_server* server) {
+ test_tcp_server_init(&server->tcp_server, on_connect, server);
+ server->head = nullptr;
+ server->tail = nullptr;
+ server->peer = nullptr;
+ server->max_reconnect_backoff_ms = 0;
+}
+
+void reconnect_server_start(reconnect_server* server, int port) {
+ test_tcp_server_start(&server->tcp_server, port);
+}
+
+void reconnect_server_poll(reconnect_server* server, int seconds) {
+ test_tcp_server_poll(&server->tcp_server, 1000 * seconds);
+}
+
+void reconnect_server_clear_timestamps(reconnect_server* server) {
+ timestamp_list* new_head = server->head;
+ while (server->head) {
+ new_head = server->head->next;
+ gpr_free(server->head);
+ server->head = new_head;
+ }
+ server->tail = nullptr;
delete server->peer;
- server->peer = nullptr;
-}
-
-void reconnect_server_destroy(reconnect_server* server) {
- reconnect_server_clear_timestamps(server);
- test_tcp_server_destroy(&server->tcp_server);
-}
+ server->peer = nullptr;
+}
+
+void reconnect_server_destroy(reconnect_server* server) {
+ reconnect_server_clear_timestamps(server);
+ test_tcp_server_destroy(&server->tcp_server);
+}
diff --git a/contrib/libs/grpc/test/core/util/reconnect_server.h b/contrib/libs/grpc/test/core/util/reconnect_server.h
index 56873630b9..5fd03821f7 100644
--- a/contrib/libs/grpc/test/core/util/reconnect_server.h
+++ b/contrib/libs/grpc/test/core/util/reconnect_server.h
@@ -1,45 +1,45 @@
-/*
- *
- * Copyright 2015-2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H
-#define GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H
-
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-#include "test/core/util/test_tcp_server.h"
-
-typedef struct timestamp_list {
- gpr_timespec timestamp;
- struct timestamp_list* next;
-} timestamp_list;
-
-typedef struct reconnect_server {
- test_tcp_server tcp_server;
- timestamp_list* head;
- timestamp_list* tail;
+/*
+ *
+ * Copyright 2015-2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H
+#define GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H
+
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+#include "test/core/util/test_tcp_server.h"
+
+typedef struct timestamp_list {
+ gpr_timespec timestamp;
+ struct timestamp_list* next;
+} timestamp_list;
+
+typedef struct reconnect_server {
+ test_tcp_server tcp_server;
+ timestamp_list* head;
+ timestamp_list* tail;
TString* peer;
- int max_reconnect_backoff_ms;
-} reconnect_server;
-
-void reconnect_server_init(reconnect_server* server);
-void reconnect_server_start(reconnect_server* server, int port);
-void reconnect_server_poll(reconnect_server* server, int seconds);
-void reconnect_server_destroy(reconnect_server* server);
-void reconnect_server_clear_timestamps(reconnect_server* server);
-
-#endif /* GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H */
+ int max_reconnect_backoff_ms;
+} reconnect_server;
+
+void reconnect_server_init(reconnect_server* server);
+void reconnect_server_start(reconnect_server* server, int port);
+void reconnect_server_poll(reconnect_server* server, int seconds);
+void reconnect_server_destroy(reconnect_server* server);
+void reconnect_server_clear_timestamps(reconnect_server* server);
+
+#endif /* GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H */
diff --git a/contrib/libs/grpc/test/core/util/run_with_poller.sh b/contrib/libs/grpc/test/core/util/run_with_poller.sh
index ad68e34b46..382a63e8ae 100755
--- a/contrib/libs/grpc/test/core/util/run_with_poller.sh
+++ b/contrib/libs/grpc/test/core/util/run_with_poller.sh
@@ -1,19 +1,19 @@
-#!/bin/sh
-# Copyright 2017 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -ex
-export GRPC_POLL_STRATEGY=$1
-shift
-"$@"
+#!/bin/sh
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+export GRPC_POLL_STRATEGY=$1
+shift
+"$@"
diff --git a/contrib/libs/grpc/test/core/util/slice_splitter.cc b/contrib/libs/grpc/test/core/util/slice_splitter.cc
index 44e44c1e0b..1f81d03d96 100644
--- a/contrib/libs/grpc/test/core/util/slice_splitter.cc
+++ b/contrib/libs/grpc/test/core/util/slice_splitter.cc
@@ -1,126 +1,126 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/slice_splitter.h"
-
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-
-#include "src/core/lib/gpr/useful.h"
-
-const char* grpc_slice_split_mode_name(grpc_slice_split_mode mode) {
- switch (mode) {
- case GRPC_SLICE_SPLIT_IDENTITY:
- return "identity";
- case GRPC_SLICE_SPLIT_MERGE_ALL:
- return "merge_all";
- case GRPC_SLICE_SPLIT_ONE_BYTE:
- return "one_byte";
- }
- return "error";
-}
-
-void grpc_split_slices(grpc_slice_split_mode mode, grpc_slice* src_slices,
- size_t src_slice_count, grpc_slice** dst_slices,
- size_t* dst_slice_count) {
- size_t i, j;
- size_t length;
-
- switch (mode) {
- case GRPC_SLICE_SPLIT_IDENTITY:
- *dst_slice_count = src_slice_count;
- *dst_slices = static_cast<grpc_slice*>(
- gpr_malloc(sizeof(grpc_slice) * src_slice_count));
- for (i = 0; i < src_slice_count; i++) {
- (*dst_slices)[i] = src_slices[i];
- grpc_slice_ref((*dst_slices)[i]);
- }
- break;
- case GRPC_SLICE_SPLIT_MERGE_ALL:
- *dst_slice_count = 1;
- length = 0;
- for (i = 0; i < src_slice_count; i++) {
- length += GRPC_SLICE_LENGTH(src_slices[i]);
- }
- *dst_slices = static_cast<grpc_slice*>(gpr_malloc(sizeof(grpc_slice)));
- **dst_slices = grpc_slice_malloc(length);
- length = 0;
- for (i = 0; i < src_slice_count; i++) {
- memcpy(GRPC_SLICE_START_PTR(**dst_slices) + length,
- GRPC_SLICE_START_PTR(src_slices[i]),
- GRPC_SLICE_LENGTH(src_slices[i]));
- length += GRPC_SLICE_LENGTH(src_slices[i]);
- }
- break;
- case GRPC_SLICE_SPLIT_ONE_BYTE:
- length = 0;
- for (i = 0; i < src_slice_count; i++) {
- length += GRPC_SLICE_LENGTH(src_slices[i]);
- }
- *dst_slice_count = length;
- *dst_slices =
- static_cast<grpc_slice*>(gpr_malloc(sizeof(grpc_slice) * length));
- length = 0;
- for (i = 0; i < src_slice_count; i++) {
- for (j = 0; j < GRPC_SLICE_LENGTH(src_slices[i]); j++) {
- (*dst_slices)[length] = grpc_slice_sub(src_slices[i], j, j + 1);
- length++;
- }
- }
- break;
- }
-}
-
-void grpc_split_slices_to_buffer(grpc_slice_split_mode mode,
- grpc_slice* src_slices, size_t src_slice_count,
- grpc_slice_buffer* dst) {
- grpc_slice* slices;
- size_t nslices;
- size_t i;
- grpc_split_slices(mode, src_slices, src_slice_count, &slices, &nslices);
- for (i = 0; i < nslices; i++) {
- /* add indexed to avoid re-merging split slices */
- grpc_slice_buffer_add_indexed(dst, slices[i]);
- }
- gpr_free(slices);
-}
-
-void grpc_split_slice_buffer(grpc_slice_split_mode mode, grpc_slice_buffer* src,
- grpc_slice_buffer* dst) {
- grpc_split_slices_to_buffer(mode, src->slices, src->count, dst);
-}
-
-grpc_slice grpc_slice_merge(grpc_slice* slices, size_t nslices) {
- uint8_t* out = nullptr;
- size_t length = 0;
- size_t capacity = 0;
- size_t i;
-
- for (i = 0; i < nslices; i++) {
- if (GRPC_SLICE_LENGTH(slices[i]) + length > capacity) {
- capacity = GPR_MAX(capacity * 2, GRPC_SLICE_LENGTH(slices[i]) + length);
- out = static_cast<uint8_t*>(gpr_realloc(out, capacity));
- }
- memcpy(out + length, GRPC_SLICE_START_PTR(slices[i]),
- GRPC_SLICE_LENGTH(slices[i]));
- length += GRPC_SLICE_LENGTH(slices[i]);
- }
-
- return grpc_slice_new(out, length, gpr_free);
-}
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/slice_splitter.h"
+
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+
+#include "src/core/lib/gpr/useful.h"
+
+const char* grpc_slice_split_mode_name(grpc_slice_split_mode mode) {
+ switch (mode) {
+ case GRPC_SLICE_SPLIT_IDENTITY:
+ return "identity";
+ case GRPC_SLICE_SPLIT_MERGE_ALL:
+ return "merge_all";
+ case GRPC_SLICE_SPLIT_ONE_BYTE:
+ return "one_byte";
+ }
+ return "error";
+}
+
+void grpc_split_slices(grpc_slice_split_mode mode, grpc_slice* src_slices,
+ size_t src_slice_count, grpc_slice** dst_slices,
+ size_t* dst_slice_count) {
+ size_t i, j;
+ size_t length;
+
+ switch (mode) {
+ case GRPC_SLICE_SPLIT_IDENTITY:
+ *dst_slice_count = src_slice_count;
+ *dst_slices = static_cast<grpc_slice*>(
+ gpr_malloc(sizeof(grpc_slice) * src_slice_count));
+ for (i = 0; i < src_slice_count; i++) {
+ (*dst_slices)[i] = src_slices[i];
+ grpc_slice_ref((*dst_slices)[i]);
+ }
+ break;
+ case GRPC_SLICE_SPLIT_MERGE_ALL:
+ *dst_slice_count = 1;
+ length = 0;
+ for (i = 0; i < src_slice_count; i++) {
+ length += GRPC_SLICE_LENGTH(src_slices[i]);
+ }
+ *dst_slices = static_cast<grpc_slice*>(gpr_malloc(sizeof(grpc_slice)));
+ **dst_slices = grpc_slice_malloc(length);
+ length = 0;
+ for (i = 0; i < src_slice_count; i++) {
+ memcpy(GRPC_SLICE_START_PTR(**dst_slices) + length,
+ GRPC_SLICE_START_PTR(src_slices[i]),
+ GRPC_SLICE_LENGTH(src_slices[i]));
+ length += GRPC_SLICE_LENGTH(src_slices[i]);
+ }
+ break;
+ case GRPC_SLICE_SPLIT_ONE_BYTE:
+ length = 0;
+ for (i = 0; i < src_slice_count; i++) {
+ length += GRPC_SLICE_LENGTH(src_slices[i]);
+ }
+ *dst_slice_count = length;
+ *dst_slices =
+ static_cast<grpc_slice*>(gpr_malloc(sizeof(grpc_slice) * length));
+ length = 0;
+ for (i = 0; i < src_slice_count; i++) {
+ for (j = 0; j < GRPC_SLICE_LENGTH(src_slices[i]); j++) {
+ (*dst_slices)[length] = grpc_slice_sub(src_slices[i], j, j + 1);
+ length++;
+ }
+ }
+ break;
+ }
+}
+
+void grpc_split_slices_to_buffer(grpc_slice_split_mode mode,
+ grpc_slice* src_slices, size_t src_slice_count,
+ grpc_slice_buffer* dst) {
+ grpc_slice* slices;
+ size_t nslices;
+ size_t i;
+ grpc_split_slices(mode, src_slices, src_slice_count, &slices, &nslices);
+ for (i = 0; i < nslices; i++) {
+ /* add indexed to avoid re-merging split slices */
+ grpc_slice_buffer_add_indexed(dst, slices[i]);
+ }
+ gpr_free(slices);
+}
+
+void grpc_split_slice_buffer(grpc_slice_split_mode mode, grpc_slice_buffer* src,
+ grpc_slice_buffer* dst) {
+ grpc_split_slices_to_buffer(mode, src->slices, src->count, dst);
+}
+
+grpc_slice grpc_slice_merge(grpc_slice* slices, size_t nslices) {
+ uint8_t* out = nullptr;
+ size_t length = 0;
+ size_t capacity = 0;
+ size_t i;
+
+ for (i = 0; i < nslices; i++) {
+ if (GRPC_SLICE_LENGTH(slices[i]) + length > capacity) {
+ capacity = GPR_MAX(capacity * 2, GRPC_SLICE_LENGTH(slices[i]) + length);
+ out = static_cast<uint8_t*>(gpr_realloc(out, capacity));
+ }
+ memcpy(out + length, GRPC_SLICE_START_PTR(slices[i]),
+ GRPC_SLICE_LENGTH(slices[i]));
+ length += GRPC_SLICE_LENGTH(slices[i]);
+ }
+
+ return grpc_slice_new(out, length, gpr_free);
+}
diff --git a/contrib/libs/grpc/test/core/util/slice_splitter.h b/contrib/libs/grpc/test/core/util/slice_splitter.h
index 3e698dffae..65b9f6f7df 100644
--- a/contrib/libs/grpc/test/core/util/slice_splitter.h
+++ b/contrib/libs/grpc/test/core/util/slice_splitter.h
@@ -1,53 +1,53 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_SLICE_SPLITTER_H
-#define GRPC_TEST_CORE_UTIL_SLICE_SPLITTER_H
-
-/* utility function to split/merge slices together to help create test
- cases */
-
-#include <grpc/slice.h>
-#include <grpc/slice_buffer.h>
-
-typedef enum {
- /* merge all input slices into a single slice */
- GRPC_SLICE_SPLIT_MERGE_ALL,
- /* leave slices as is */
- GRPC_SLICE_SPLIT_IDENTITY,
- /* split slices into one byte chunks */
- GRPC_SLICE_SPLIT_ONE_BYTE
-} grpc_slice_split_mode;
-
-/* allocates *dst_slices; caller must unref all slices in dst_slices then free
- it */
-void grpc_split_slices(grpc_slice_split_mode mode, grpc_slice* src_slices,
- size_t src_slice_count, grpc_slice** dst_slices,
- size_t* dst_slice_count);
-
-void grpc_split_slices_to_buffer(grpc_slice_split_mode mode,
- grpc_slice* src_slices, size_t src_slice_count,
- grpc_slice_buffer* dst);
-void grpc_split_slice_buffer(grpc_slice_split_mode mode, grpc_slice_buffer* src,
- grpc_slice_buffer* dst);
-
-grpc_slice grpc_slice_merge(grpc_slice* slices, size_t nslices);
-
-const char* grpc_slice_split_mode_name(grpc_slice_split_mode mode);
-
-#endif /* GRPC_TEST_CORE_UTIL_SLICE_SPLITTER_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_SLICE_SPLITTER_H
+#define GRPC_TEST_CORE_UTIL_SLICE_SPLITTER_H
+
+/* utility function to split/merge slices together to help create test
+ cases */
+
+#include <grpc/slice.h>
+#include <grpc/slice_buffer.h>
+
+typedef enum {
+ /* merge all input slices into a single slice */
+ GRPC_SLICE_SPLIT_MERGE_ALL,
+ /* leave slices as is */
+ GRPC_SLICE_SPLIT_IDENTITY,
+ /* split slices into one byte chunks */
+ GRPC_SLICE_SPLIT_ONE_BYTE
+} grpc_slice_split_mode;
+
+/* allocates *dst_slices; caller must unref all slices in dst_slices then free
+ it */
+void grpc_split_slices(grpc_slice_split_mode mode, grpc_slice* src_slices,
+ size_t src_slice_count, grpc_slice** dst_slices,
+ size_t* dst_slice_count);
+
+void grpc_split_slices_to_buffer(grpc_slice_split_mode mode,
+ grpc_slice* src_slices, size_t src_slice_count,
+ grpc_slice_buffer* dst);
+void grpc_split_slice_buffer(grpc_slice_split_mode mode, grpc_slice_buffer* src,
+ grpc_slice_buffer* dst);
+
+grpc_slice grpc_slice_merge(grpc_slice* slices, size_t nslices);
+
+const char* grpc_slice_split_mode_name(grpc_slice_split_mode mode);
+
+#endif /* GRPC_TEST_CORE_UTIL_SLICE_SPLITTER_H */
diff --git a/contrib/libs/grpc/test/core/util/subprocess.h b/contrib/libs/grpc/test/core/util/subprocess.h
index 3163086bba..c7fe9af435 100644
--- a/contrib/libs/grpc/test/core/util/subprocess.h
+++ b/contrib/libs/grpc/test/core/util/subprocess.h
@@ -1,36 +1,36 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_SUBPROCESS_H
-#define GRPC_TEST_CORE_UTIL_SUBPROCESS_H
-
-#include <grpc/support/port_platform.h>
-
-typedef struct gpr_subprocess gpr_subprocess;
-
-/** .exe on windows, empty on unices */
-const char* gpr_subprocess_binary_extension();
-
-gpr_subprocess* gpr_subprocess_create(int argc, const char** argv);
-/** if subprocess has not been joined, kill it */
-void gpr_subprocess_destroy(gpr_subprocess* p);
-/** returns exit status; can be called at most once */
-int gpr_subprocess_join(gpr_subprocess* p);
-void gpr_subprocess_interrupt(gpr_subprocess* p);
-
-#endif /* GRPC_TEST_CORE_UTIL_SUBPROCESS_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_SUBPROCESS_H
+#define GRPC_TEST_CORE_UTIL_SUBPROCESS_H
+
+#include <grpc/support/port_platform.h>
+
+typedef struct gpr_subprocess gpr_subprocess;
+
+/** .exe on windows, empty on unices */
+const char* gpr_subprocess_binary_extension();
+
+gpr_subprocess* gpr_subprocess_create(int argc, const char** argv);
+/** if subprocess has not been joined, kill it */
+void gpr_subprocess_destroy(gpr_subprocess* p);
+/** returns exit status; can be called at most once */
+int gpr_subprocess_join(gpr_subprocess* p);
+void gpr_subprocess_interrupt(gpr_subprocess* p);
+
+#endif /* GRPC_TEST_CORE_UTIL_SUBPROCESS_H */
diff --git a/contrib/libs/grpc/test/core/util/subprocess_posix.cc b/contrib/libs/grpc/test/core/util/subprocess_posix.cc
index 263de85fbb..ab288d777f 100644
--- a/contrib/libs/grpc/test/core/util/subprocess_posix.cc
+++ b/contrib/libs/grpc/test/core/util/subprocess_posix.cc
@@ -1,100 +1,100 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_POSIX_SUBPROCESS
-
-#include <assert.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "test/core/util/subprocess.h"
-
-struct gpr_subprocess {
- int pid;
- bool joined;
-};
-
-const char* gpr_subprocess_binary_extension() { return ""; }
-
-gpr_subprocess* gpr_subprocess_create(int argc, const char** argv) {
- gpr_subprocess* r;
- int pid;
- char** exec_args;
-
- pid = fork();
- if (pid == -1) {
- return nullptr;
- } else if (pid == 0) {
- exec_args = static_cast<char**>(
- gpr_malloc((static_cast<size_t>(argc) + 1) * sizeof(char*)));
- memcpy(exec_args, argv, static_cast<size_t>(argc) * sizeof(char*));
- exec_args[argc] = nullptr;
- execv(exec_args[0], exec_args);
- /* if we reach here, an error has occurred */
- gpr_log(GPR_ERROR, "execv '%s' failed: %s", exec_args[0], strerror(errno));
- _exit(1);
- return nullptr;
- } else {
- r = static_cast<gpr_subprocess*>(gpr_zalloc(sizeof(gpr_subprocess)));
- r->pid = pid;
- return r;
- }
-}
-
-void gpr_subprocess_destroy(gpr_subprocess* p) {
- if (!p->joined) {
- kill(p->pid, SIGKILL);
- gpr_subprocess_join(p);
- }
- gpr_free(p);
-}
-
-int gpr_subprocess_join(gpr_subprocess* p) {
- int status;
-retry:
- if (waitpid(p->pid, &status, 0) == -1) {
- if (errno == EINTR) {
- goto retry;
- }
- gpr_log(GPR_ERROR, "waitpid failed for pid %d: %s", p->pid,
- strerror(errno));
- return -1;
- }
- p->joined = true;
- return status;
-}
-
-void gpr_subprocess_interrupt(gpr_subprocess* p) {
- if (!p->joined) {
- kill(p->pid, SIGINT);
- }
-}
-
-#endif /* GPR_POSIX_SUBPROCESS */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_POSIX_SUBPROCESS
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "test/core/util/subprocess.h"
+
+struct gpr_subprocess {
+ int pid;
+ bool joined;
+};
+
+const char* gpr_subprocess_binary_extension() { return ""; }
+
+gpr_subprocess* gpr_subprocess_create(int argc, const char** argv) {
+ gpr_subprocess* r;
+ int pid;
+ char** exec_args;
+
+ pid = fork();
+ if (pid == -1) {
+ return nullptr;
+ } else if (pid == 0) {
+ exec_args = static_cast<char**>(
+ gpr_malloc((static_cast<size_t>(argc) + 1) * sizeof(char*)));
+ memcpy(exec_args, argv, static_cast<size_t>(argc) * sizeof(char*));
+ exec_args[argc] = nullptr;
+ execv(exec_args[0], exec_args);
+ /* if we reach here, an error has occurred */
+ gpr_log(GPR_ERROR, "execv '%s' failed: %s", exec_args[0], strerror(errno));
+ _exit(1);
+ return nullptr;
+ } else {
+ r = static_cast<gpr_subprocess*>(gpr_zalloc(sizeof(gpr_subprocess)));
+ r->pid = pid;
+ return r;
+ }
+}
+
+void gpr_subprocess_destroy(gpr_subprocess* p) {
+ if (!p->joined) {
+ kill(p->pid, SIGKILL);
+ gpr_subprocess_join(p);
+ }
+ gpr_free(p);
+}
+
+int gpr_subprocess_join(gpr_subprocess* p) {
+ int status;
+retry:
+ if (waitpid(p->pid, &status, 0) == -1) {
+ if (errno == EINTR) {
+ goto retry;
+ }
+ gpr_log(GPR_ERROR, "waitpid failed for pid %d: %s", p->pid,
+ strerror(errno));
+ return -1;
+ }
+ p->joined = true;
+ return status;
+}
+
+void gpr_subprocess_interrupt(gpr_subprocess* p) {
+ if (!p->joined) {
+ kill(p->pid, SIGINT);
+ }
+}
+
+#endif /* GPR_POSIX_SUBPROCESS */
diff --git a/contrib/libs/grpc/test/core/util/subprocess_windows.cc b/contrib/libs/grpc/test/core/util/subprocess_windows.cc
index 4ca8dc3507..d3295244ea 100644
--- a/contrib/libs/grpc/test/core/util/subprocess_windows.cc
+++ b/contrib/libs/grpc/test/core/util/subprocess_windows.cc
@@ -1,126 +1,126 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#ifdef GPR_WINDOWS_SUBPROCESS
-
-#include <string.h>
-#include <tchar.h>
-#include <windows.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include "src/core/lib/gpr/string.h"
-#include "src/core/lib/gpr/string_windows.h"
-#include "test/core/util/subprocess.h"
-
-struct gpr_subprocess {
- PROCESS_INFORMATION pi;
- int joined;
- int interrupted;
-};
-
-const char* gpr_subprocess_binary_extension() { return ".exe"; }
-
-gpr_subprocess* gpr_subprocess_create(int argc, const char** argv) {
- gpr_subprocess* r;
-
- STARTUPINFO si;
- PROCESS_INFORMATION pi;
-
- char* args = gpr_strjoin_sep(argv, (size_t)argc, " ", NULL);
- TCHAR* args_tchar;
-
- args_tchar = gpr_char_to_tchar(args);
- gpr_free(args);
-
- memset(&si, 0, sizeof(si));
- si.cb = sizeof(si);
- memset(&pi, 0, sizeof(pi));
-
- if (!CreateProcess(NULL, args_tchar, NULL, NULL, FALSE,
- CREATE_NEW_PROCESS_GROUP, NULL, NULL, &si, &pi)) {
- gpr_free(args_tchar);
- return NULL;
- }
- gpr_free(args_tchar);
-
- r = (gpr_subprocess*)gpr_malloc(sizeof(gpr_subprocess));
- memset(r, 0, sizeof(*r));
- r->pi = pi;
- return r;
-}
-
-void gpr_subprocess_destroy(gpr_subprocess* p) {
- if (p) {
- if (!p->joined) {
- gpr_subprocess_interrupt(p);
- gpr_subprocess_join(p);
- }
- if (p->pi.hProcess) {
- CloseHandle(p->pi.hProcess);
- }
- if (p->pi.hThread) {
- CloseHandle(p->pi.hThread);
- }
- gpr_free(p);
- }
-}
-
-int gpr_subprocess_join(gpr_subprocess* p) {
- DWORD dwExitCode;
- if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
- if (dwExitCode == STILL_ACTIVE) {
- if (WaitForSingleObject(p->pi.hProcess, INFINITE) == WAIT_OBJECT_0) {
- p->joined = 1;
- goto getExitCode;
- }
- return -1; // failed to join
- } else {
- goto getExitCode;
- }
- } else {
- return -1; // failed to get exit code
- }
-
-getExitCode:
- if (p->interrupted) {
- return 0;
- }
- if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
- return (int)dwExitCode;
- } else {
- return -1; // failed to get exit code
- }
-}
-
-void gpr_subprocess_interrupt(gpr_subprocess* p) {
- DWORD dwExitCode;
- if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
- if (dwExitCode == STILL_ACTIVE) {
- gpr_log(GPR_INFO, "sending ctrl-break");
- GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, p->pi.dwProcessId);
- p->joined = 1;
- p->interrupted = 1;
- }
- }
- return;
-}
-
-#endif /* GPR_WINDOWS_SUBPROCESS */
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#ifdef GPR_WINDOWS_SUBPROCESS
+
+#include <string.h>
+#include <tchar.h>
+#include <windows.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gpr/string_windows.h"
+#include "test/core/util/subprocess.h"
+
+struct gpr_subprocess {
+ PROCESS_INFORMATION pi;
+ int joined;
+ int interrupted;
+};
+
+const char* gpr_subprocess_binary_extension() { return ".exe"; }
+
+gpr_subprocess* gpr_subprocess_create(int argc, const char** argv) {
+ gpr_subprocess* r;
+
+ STARTUPINFO si;
+ PROCESS_INFORMATION pi;
+
+ char* args = gpr_strjoin_sep(argv, (size_t)argc, " ", NULL);
+ TCHAR* args_tchar;
+
+ args_tchar = gpr_char_to_tchar(args);
+ gpr_free(args);
+
+ memset(&si, 0, sizeof(si));
+ si.cb = sizeof(si);
+ memset(&pi, 0, sizeof(pi));
+
+ if (!CreateProcess(NULL, args_tchar, NULL, NULL, FALSE,
+ CREATE_NEW_PROCESS_GROUP, NULL, NULL, &si, &pi)) {
+ gpr_free(args_tchar);
+ return NULL;
+ }
+ gpr_free(args_tchar);
+
+ r = (gpr_subprocess*)gpr_malloc(sizeof(gpr_subprocess));
+ memset(r, 0, sizeof(*r));
+ r->pi = pi;
+ return r;
+}
+
+void gpr_subprocess_destroy(gpr_subprocess* p) {
+ if (p) {
+ if (!p->joined) {
+ gpr_subprocess_interrupt(p);
+ gpr_subprocess_join(p);
+ }
+ if (p->pi.hProcess) {
+ CloseHandle(p->pi.hProcess);
+ }
+ if (p->pi.hThread) {
+ CloseHandle(p->pi.hThread);
+ }
+ gpr_free(p);
+ }
+}
+
+int gpr_subprocess_join(gpr_subprocess* p) {
+ DWORD dwExitCode;
+ if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
+ if (dwExitCode == STILL_ACTIVE) {
+ if (WaitForSingleObject(p->pi.hProcess, INFINITE) == WAIT_OBJECT_0) {
+ p->joined = 1;
+ goto getExitCode;
+ }
+ return -1; // failed to join
+ } else {
+ goto getExitCode;
+ }
+ } else {
+ return -1; // failed to get exit code
+ }
+
+getExitCode:
+ if (p->interrupted) {
+ return 0;
+ }
+ if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
+ return (int)dwExitCode;
+ } else {
+ return -1; // failed to get exit code
+ }
+}
+
+void gpr_subprocess_interrupt(gpr_subprocess* p) {
+ DWORD dwExitCode;
+ if (GetExitCodeProcess(p->pi.hProcess, &dwExitCode)) {
+ if (dwExitCode == STILL_ACTIVE) {
+ gpr_log(GPR_INFO, "sending ctrl-break");
+ GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, p->pi.dwProcessId);
+ p->joined = 1;
+ p->interrupted = 1;
+ }
+ }
+ return;
+}
+
+#endif /* GPR_WINDOWS_SUBPROCESS */
diff --git a/contrib/libs/grpc/test/core/util/test_config.cc b/contrib/libs/grpc/test/core/util/test_config.cc
index 9f3e935a3a..9e57a486b2 100644
--- a/contrib/libs/grpc/test/core/util/test_config.cc
+++ b/contrib/libs/grpc/test/core/util/test_config.cc
@@ -1,111 +1,111 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/test_config.h"
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/test_config.h"
+
#include <grpc/impl/codegen/gpr_types.h>
-#include <inttypes.h>
-#include <signal.h>
-#include <stdbool.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
+#include <inttypes.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-
-#include "src/core/lib/gpr/string.h"
-#include "src/core/lib/gpr/useful.h"
-#include "src/core/lib/surface/init.h"
-
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gpr/useful.h"
+#include "src/core/lib/surface/init.h"
+
#include "y_absl/debugging/failure_signal_handler.h"
#include "y_absl/debugging/symbolize.h"
-int64_t g_fixture_slowdown_factor = 1;
-int64_t g_poller_slowdown_factor = 1;
-
-#if GPR_GETPID_IN_UNISTD_H
-#include <unistd.h>
-static unsigned seed(void) { return static_cast<unsigned>(getpid()); }
-#endif
-
-#if GPR_GETPID_IN_PROCESS_H
-#include <process.h>
-static unsigned seed(void) { return (unsigned)_getpid(); }
-#endif
-
-#if GPR_WINDOWS_CRASH_HANDLER
-#include <windows.h>
-
-#include <tchar.h>
-
-// disable warning 4091 - dbghelp.h is broken for msvc2015
-#pragma warning(disable : 4091)
-#define DBGHELP_TRANSLATE_TCHAR
-#include <dbghelp.h>
-
-#ifdef _MSC_VER
-#pragma comment(lib, "dbghelp.lib")
-#endif
-
+int64_t g_fixture_slowdown_factor = 1;
+int64_t g_poller_slowdown_factor = 1;
+
+#if GPR_GETPID_IN_UNISTD_H
+#include <unistd.h>
+static unsigned seed(void) { return static_cast<unsigned>(getpid()); }
+#endif
+
+#if GPR_GETPID_IN_PROCESS_H
+#include <process.h>
+static unsigned seed(void) { return (unsigned)_getpid(); }
+#endif
+
+#if GPR_WINDOWS_CRASH_HANDLER
+#include <windows.h>
+
+#include <tchar.h>
+
+// disable warning 4091 - dbghelp.h is broken for msvc2015
+#pragma warning(disable : 4091)
+#define DBGHELP_TRANSLATE_TCHAR
+#include <dbghelp.h>
+
+#ifdef _MSC_VER
+#pragma comment(lib, "dbghelp.lib")
+#endif
+
static void print_stack_from_context(HANDLE thread, CONTEXT c) {
- STACKFRAME s; // in/out stackframe
- memset(&s, 0, sizeof(s));
- DWORD imageType;
-#ifdef _M_IX86
- // normally, call ImageNtHeader() and use machine info from PE header
- imageType = IMAGE_FILE_MACHINE_I386;
- s.AddrPC.Offset = c.Eip;
- s.AddrPC.Mode = AddrModeFlat;
- s.AddrFrame.Offset = c.Ebp;
- s.AddrFrame.Mode = AddrModeFlat;
- s.AddrStack.Offset = c.Esp;
- s.AddrStack.Mode = AddrModeFlat;
-#elif _M_X64
- imageType = IMAGE_FILE_MACHINE_AMD64;
- s.AddrPC.Offset = c.Rip;
- s.AddrPC.Mode = AddrModeFlat;
- s.AddrFrame.Offset = c.Rbp;
- s.AddrFrame.Mode = AddrModeFlat;
- s.AddrStack.Offset = c.Rsp;
- s.AddrStack.Mode = AddrModeFlat;
-#elif _M_IA64
- imageType = IMAGE_FILE_MACHINE_IA64;
- s.AddrPC.Offset = c.StIIP;
- s.AddrPC.Mode = AddrModeFlat;
- s.AddrFrame.Offset = c.IntSp;
- s.AddrFrame.Mode = AddrModeFlat;
- s.AddrBStore.Offset = c.RsBSP;
- s.AddrBStore.Mode = AddrModeFlat;
- s.AddrStack.Offset = c.IntSp;
- s.AddrStack.Mode = AddrModeFlat;
-#else
-#error "Platform not supported!"
-#endif
-
- HANDLE process = GetCurrentProcess();
-
- SYMBOL_INFOW* symbol =
- (SYMBOL_INFOW*)calloc(sizeof(SYMBOL_INFOW) + 256 * sizeof(wchar_t), 1);
- symbol->MaxNameLen = 255;
- symbol->SizeOfStruct = sizeof(SYMBOL_INFOW);
-
+ STACKFRAME s; // in/out stackframe
+ memset(&s, 0, sizeof(s));
+ DWORD imageType;
+#ifdef _M_IX86
+ // normally, call ImageNtHeader() and use machine info from PE header
+ imageType = IMAGE_FILE_MACHINE_I386;
+ s.AddrPC.Offset = c.Eip;
+ s.AddrPC.Mode = AddrModeFlat;
+ s.AddrFrame.Offset = c.Ebp;
+ s.AddrFrame.Mode = AddrModeFlat;
+ s.AddrStack.Offset = c.Esp;
+ s.AddrStack.Mode = AddrModeFlat;
+#elif _M_X64
+ imageType = IMAGE_FILE_MACHINE_AMD64;
+ s.AddrPC.Offset = c.Rip;
+ s.AddrPC.Mode = AddrModeFlat;
+ s.AddrFrame.Offset = c.Rbp;
+ s.AddrFrame.Mode = AddrModeFlat;
+ s.AddrStack.Offset = c.Rsp;
+ s.AddrStack.Mode = AddrModeFlat;
+#elif _M_IA64
+ imageType = IMAGE_FILE_MACHINE_IA64;
+ s.AddrPC.Offset = c.StIIP;
+ s.AddrPC.Mode = AddrModeFlat;
+ s.AddrFrame.Offset = c.IntSp;
+ s.AddrFrame.Mode = AddrModeFlat;
+ s.AddrBStore.Offset = c.RsBSP;
+ s.AddrBStore.Mode = AddrModeFlat;
+ s.AddrStack.Offset = c.IntSp;
+ s.AddrStack.Mode = AddrModeFlat;
+#else
+#error "Platform not supported!"
+#endif
+
+ HANDLE process = GetCurrentProcess();
+
+ SYMBOL_INFOW* symbol =
+ (SYMBOL_INFOW*)calloc(sizeof(SYMBOL_INFOW) + 256 * sizeof(wchar_t), 1);
+ symbol->MaxNameLen = 255;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFOW);
+
const unsigned short MAX_CALLERS_SHOWN =
8192; // avoid flooding the stderr if stacktrace is way too long
for (int frame = 0; frame < MAX_CALLERS_SHOWN &&
@@ -133,265 +133,265 @@ static void print_stack_from_context(HANDLE thread, CONTEXT c) {
fwprintf(stderr, L"*** %d: %016I64X %ls - %016I64X (%ls:%d)\n", frame,
(DWORD64)(s.AddrPC.Offset), symbol_name, symbol_address, file_name,
line_number);
- fflush(stderr);
- }
-
- free(symbol);
-}
-
+ fflush(stderr);
+ }
+
+ free(symbol);
+}
+
static void print_current_stack() {
CONTEXT context;
RtlCaptureContext(&context);
print_stack_from_context(GetCurrentThread(), context);
}
-static LONG crash_handler(struct _EXCEPTION_POINTERS* ex_info) {
- fprintf(stderr, "Exception handler called, dumping information\n");
- bool try_to_print_stack = true;
- PEXCEPTION_RECORD exrec = ex_info->ExceptionRecord;
- while (exrec) {
- DWORD code = exrec->ExceptionCode;
- DWORD flgs = exrec->ExceptionFlags;
- PVOID addr = exrec->ExceptionAddress;
- if (code == EXCEPTION_STACK_OVERFLOW) try_to_print_stack = false;
- fprintf(stderr, "code: %x - flags: %d - address: %p\n", code, flgs, addr);
- exrec = exrec->ExceptionRecord;
- }
- if (try_to_print_stack) {
+static LONG crash_handler(struct _EXCEPTION_POINTERS* ex_info) {
+ fprintf(stderr, "Exception handler called, dumping information\n");
+ bool try_to_print_stack = true;
+ PEXCEPTION_RECORD exrec = ex_info->ExceptionRecord;
+ while (exrec) {
+ DWORD code = exrec->ExceptionCode;
+ DWORD flgs = exrec->ExceptionFlags;
+ PVOID addr = exrec->ExceptionAddress;
+ if (code == EXCEPTION_STACK_OVERFLOW) try_to_print_stack = false;
+ fprintf(stderr, "code: %x - flags: %d - address: %p\n", code, flgs, addr);
+ exrec = exrec->ExceptionRecord;
+ }
+ if (try_to_print_stack) {
print_stack_from_context(GetCurrentThread(), *ex_info->ContextRecord);
- }
- if (IsDebuggerPresent()) {
- __debugbreak();
- } else {
- _exit(1);
- }
- return EXCEPTION_EXECUTE_HANDLER;
-}
-
-static void abort_handler(int sig) {
- fprintf(stderr, "Abort handler called.\n");
- print_current_stack();
- if (IsDebuggerPresent()) {
- __debugbreak();
- } else {
- _exit(1);
- }
-}
-
-static void install_crash_handler() {
- if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
- fprintf(stderr, "SymInitialize failed: %d\n", GetLastError());
- }
- SetUnhandledExceptionFilter((LPTOP_LEVEL_EXCEPTION_FILTER)crash_handler);
- _set_abort_behavior(0, _WRITE_ABORT_MSG);
- _set_abort_behavior(0, _CALL_REPORTFAULT);
- signal(SIGABRT, abort_handler);
-}
-#elif GPR_POSIX_CRASH_HANDLER
-#include <errno.h>
-#include <execinfo.h>
-#include <stdio.h>
-#include <string.h>
-
-#define SIGNAL_NAMES_LENGTH 32
-
-static const char* const signal_names[] = {
- nullptr, "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
- "SIGABRT", "SIGBUS", "SIGFPE", "SIGKILL", "SIGUSR1", "SIGSEGV",
- "SIGUSR2", "SIGPIPE", "SIGALRM", "SIGTERM", "SIGSTKFLT", "SIGCHLD",
- "SIGCONT", "SIGSTOP", "SIGTSTP", "SIGTTIN", "SIGTTOU", "SIGURG",
- "SIGXCPU", "SIGXFSZ", "SIGVTALRM", "SIGPROF", "SIGWINCH", "SIGIO",
- "SIGPWR", "SIGSYS"};
-
-static char g_alt_stack[GPR_MAX(MINSIGSTKSZ, 65536)];
-
-#define MAX_FRAMES 32
-
-/* signal safe output */
-static void output_string(const char* string) {
- size_t len = strlen(string);
- ssize_t r;
-
- do {
- r = write(STDERR_FILENO, string, len);
- } while (r == -1 && errno == EINTR);
-}
-
-static void output_num(long num) {
- char buf[GPR_LTOA_MIN_BUFSIZE];
- gpr_ltoa(num, buf);
- output_string(buf);
-}
-
-static void crash_handler(int signum, siginfo_t* /*info*/, void* /*data*/) {
- void* addrlist[MAX_FRAMES + 1];
- int addrlen;
-
- output_string("\n\n\n*******************************\nCaught signal ");
- if (signum > 0 && signum < SIGNAL_NAMES_LENGTH) {
- output_string(signal_names[signum]);
- } else {
- output_num(signum);
- }
- output_string("\n");
-
- addrlen = backtrace(addrlist, GPR_ARRAY_SIZE(addrlist));
-
- if (addrlen == 0) {
- output_string(" no backtrace\n");
- } else {
- backtrace_symbols_fd(addrlist, addrlen, STDERR_FILENO);
- }
-
- /* try to get a core dump for SIGTERM */
- if (signum == SIGTERM) signum = SIGQUIT;
- raise(signum);
-}
-
-static void install_crash_handler() {
- stack_t ss;
- struct sigaction sa;
-
- memset(&ss, 0, sizeof(ss));
- memset(&sa, 0, sizeof(sa));
- ss.ss_size = sizeof(g_alt_stack);
- ss.ss_sp = g_alt_stack;
- GPR_ASSERT(sigaltstack(&ss, nullptr) == 0);
- sa.sa_flags = static_cast<int>(SA_SIGINFO | SA_ONSTACK | SA_RESETHAND);
- sa.sa_sigaction = crash_handler;
- GPR_ASSERT(sigaction(SIGILL, &sa, nullptr) == 0);
- GPR_ASSERT(sigaction(SIGABRT, &sa, nullptr) == 0);
- GPR_ASSERT(sigaction(SIGBUS, &sa, nullptr) == 0);
- GPR_ASSERT(sigaction(SIGSEGV, &sa, nullptr) == 0);
- GPR_ASSERT(sigaction(SIGTERM, &sa, nullptr) == 0);
- GPR_ASSERT(sigaction(SIGQUIT, &sa, nullptr) == 0);
-}
-#else
-static void install_crash_handler() {}
-#endif
-
-bool BuiltUnderValgrind() {
-#ifdef RUNNING_ON_VALGRIND
- return true;
-#else
- return false;
-#endif
-}
-
-bool BuiltUnderTsan() {
-#if defined(__has_feature)
-#if __has_feature(thread_sanitizer)
- return true;
-#else
- return false;
-#endif
-#else
-#ifdef THREAD_SANITIZER
- return true;
-#else
- return false;
-#endif
-#endif
-}
-
-bool BuiltUnderAsan() {
-#if defined(__has_feature)
-#if __has_feature(address_sanitizer)
- return true;
-#else
- return false;
-#endif
-#else
-#ifdef ADDRESS_SANITIZER
- return true;
-#else
- return false;
-#endif
-#endif
-}
-
-bool BuiltUnderMsan() {
-#if defined(__has_feature)
-#if __has_feature(memory_sanitizer)
- return true;
-#else
- return false;
-#endif
-#else
-#ifdef MEMORY_SANITIZER
- return true;
-#else
- return false;
-#endif
-#endif
-}
-
-bool BuiltUnderUbsan() {
-#ifdef GRPC_UBSAN
- return true;
-#else
- return false;
-#endif
-}
-
-int64_t grpc_test_sanitizer_slowdown_factor() {
- int64_t sanitizer_multiplier = 1;
- if (BuiltUnderValgrind()) {
- sanitizer_multiplier = 20;
- } else if (BuiltUnderTsan()) {
- sanitizer_multiplier = 5;
- } else if (BuiltUnderAsan()) {
- sanitizer_multiplier = 3;
- } else if (BuiltUnderMsan()) {
- sanitizer_multiplier = 4;
- } else if (BuiltUnderUbsan()) {
- sanitizer_multiplier = 5;
- }
- return sanitizer_multiplier;
-}
-
-int64_t grpc_test_slowdown_factor() {
- return grpc_test_sanitizer_slowdown_factor() * g_fixture_slowdown_factor *
- g_poller_slowdown_factor;
-}
-
-gpr_timespec grpc_timeout_seconds_to_deadline(int64_t time_s) {
- return gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_millis(
- grpc_test_slowdown_factor() * static_cast<int64_t>(1e3) * time_s,
- GPR_TIMESPAN));
-}
-
-gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms) {
- return gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(
- grpc_test_slowdown_factor() * static_cast<int64_t>(1e3) * time_ms,
- GPR_TIMESPAN));
-}
-
+ }
+ if (IsDebuggerPresent()) {
+ __debugbreak();
+ } else {
+ _exit(1);
+ }
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+static void abort_handler(int sig) {
+ fprintf(stderr, "Abort handler called.\n");
+ print_current_stack();
+ if (IsDebuggerPresent()) {
+ __debugbreak();
+ } else {
+ _exit(1);
+ }
+}
+
+static void install_crash_handler() {
+ if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+ fprintf(stderr, "SymInitialize failed: %d\n", GetLastError());
+ }
+ SetUnhandledExceptionFilter((LPTOP_LEVEL_EXCEPTION_FILTER)crash_handler);
+ _set_abort_behavior(0, _WRITE_ABORT_MSG);
+ _set_abort_behavior(0, _CALL_REPORTFAULT);
+ signal(SIGABRT, abort_handler);
+}
+#elif GPR_POSIX_CRASH_HANDLER
+#include <errno.h>
+#include <execinfo.h>
+#include <stdio.h>
+#include <string.h>
+
+#define SIGNAL_NAMES_LENGTH 32
+
+static const char* const signal_names[] = {
+ nullptr, "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
+ "SIGABRT", "SIGBUS", "SIGFPE", "SIGKILL", "SIGUSR1", "SIGSEGV",
+ "SIGUSR2", "SIGPIPE", "SIGALRM", "SIGTERM", "SIGSTKFLT", "SIGCHLD",
+ "SIGCONT", "SIGSTOP", "SIGTSTP", "SIGTTIN", "SIGTTOU", "SIGURG",
+ "SIGXCPU", "SIGXFSZ", "SIGVTALRM", "SIGPROF", "SIGWINCH", "SIGIO",
+ "SIGPWR", "SIGSYS"};
+
+static char g_alt_stack[GPR_MAX(MINSIGSTKSZ, 65536)];
+
+#define MAX_FRAMES 32
+
+/* signal safe output */
+static void output_string(const char* string) {
+ size_t len = strlen(string);
+ ssize_t r;
+
+ do {
+ r = write(STDERR_FILENO, string, len);
+ } while (r == -1 && errno == EINTR);
+}
+
+static void output_num(long num) {
+ char buf[GPR_LTOA_MIN_BUFSIZE];
+ gpr_ltoa(num, buf);
+ output_string(buf);
+}
+
+static void crash_handler(int signum, siginfo_t* /*info*/, void* /*data*/) {
+ void* addrlist[MAX_FRAMES + 1];
+ int addrlen;
+
+ output_string("\n\n\n*******************************\nCaught signal ");
+ if (signum > 0 && signum < SIGNAL_NAMES_LENGTH) {
+ output_string(signal_names[signum]);
+ } else {
+ output_num(signum);
+ }
+ output_string("\n");
+
+ addrlen = backtrace(addrlist, GPR_ARRAY_SIZE(addrlist));
+
+ if (addrlen == 0) {
+ output_string(" no backtrace\n");
+ } else {
+ backtrace_symbols_fd(addrlist, addrlen, STDERR_FILENO);
+ }
+
+ /* try to get a core dump for SIGTERM */
+ if (signum == SIGTERM) signum = SIGQUIT;
+ raise(signum);
+}
+
+static void install_crash_handler() {
+ stack_t ss;
+ struct sigaction sa;
+
+ memset(&ss, 0, sizeof(ss));
+ memset(&sa, 0, sizeof(sa));
+ ss.ss_size = sizeof(g_alt_stack);
+ ss.ss_sp = g_alt_stack;
+ GPR_ASSERT(sigaltstack(&ss, nullptr) == 0);
+ sa.sa_flags = static_cast<int>(SA_SIGINFO | SA_ONSTACK | SA_RESETHAND);
+ sa.sa_sigaction = crash_handler;
+ GPR_ASSERT(sigaction(SIGILL, &sa, nullptr) == 0);
+ GPR_ASSERT(sigaction(SIGABRT, &sa, nullptr) == 0);
+ GPR_ASSERT(sigaction(SIGBUS, &sa, nullptr) == 0);
+ GPR_ASSERT(sigaction(SIGSEGV, &sa, nullptr) == 0);
+ GPR_ASSERT(sigaction(SIGTERM, &sa, nullptr) == 0);
+ GPR_ASSERT(sigaction(SIGQUIT, &sa, nullptr) == 0);
+}
+#else
+static void install_crash_handler() {}
+#endif
+
+bool BuiltUnderValgrind() {
+#ifdef RUNNING_ON_VALGRIND
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool BuiltUnderTsan() {
+#if defined(__has_feature)
+#if __has_feature(thread_sanitizer)
+ return true;
+#else
+ return false;
+#endif
+#else
+#ifdef THREAD_SANITIZER
+ return true;
+#else
+ return false;
+#endif
+#endif
+}
+
+bool BuiltUnderAsan() {
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+ return true;
+#else
+ return false;
+#endif
+#else
+#ifdef ADDRESS_SANITIZER
+ return true;
+#else
+ return false;
+#endif
+#endif
+}
+
+bool BuiltUnderMsan() {
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+ return true;
+#else
+ return false;
+#endif
+#else
+#ifdef MEMORY_SANITIZER
+ return true;
+#else
+ return false;
+#endif
+#endif
+}
+
+bool BuiltUnderUbsan() {
+#ifdef GRPC_UBSAN
+ return true;
+#else
+ return false;
+#endif
+}
+
+int64_t grpc_test_sanitizer_slowdown_factor() {
+ int64_t sanitizer_multiplier = 1;
+ if (BuiltUnderValgrind()) {
+ sanitizer_multiplier = 20;
+ } else if (BuiltUnderTsan()) {
+ sanitizer_multiplier = 5;
+ } else if (BuiltUnderAsan()) {
+ sanitizer_multiplier = 3;
+ } else if (BuiltUnderMsan()) {
+ sanitizer_multiplier = 4;
+ } else if (BuiltUnderUbsan()) {
+ sanitizer_multiplier = 5;
+ }
+ return sanitizer_multiplier;
+}
+
+int64_t grpc_test_slowdown_factor() {
+ return grpc_test_sanitizer_slowdown_factor() * g_fixture_slowdown_factor *
+ g_poller_slowdown_factor;
+}
+
+gpr_timespec grpc_timeout_seconds_to_deadline(int64_t time_s) {
+ return gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_millis(
+ grpc_test_slowdown_factor() * static_cast<int64_t>(1e3) * time_s,
+ GPR_TIMESPAN));
+}
+
+gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms) {
+ return gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_micros(
+ grpc_test_slowdown_factor() * static_cast<int64_t>(1e3) * time_ms,
+ GPR_TIMESPAN));
+}
+
void grpc_test_init(int argc, char** argv) {
#if GPR_WINDOWS
// Windows cannot use y_absl::InitializeSymbolizer until it fixes mysterious
// SymInitialize failure using Bazel RBE on Windows
// https://github.com/grpc/grpc/issues/24178
- install_crash_handler();
+ install_crash_handler();
#else
y_absl::InitializeSymbolizer(argv[0]);
y_absl::FailureSignalHandlerOptions options;
y_absl::InstallFailureSignalHandler(options);
#endif
- gpr_log(GPR_DEBUG,
- "test slowdown factor: sanitizer=%" PRId64 ", fixture=%" PRId64
- ", poller=%" PRId64 ", total=%" PRId64,
- grpc_test_sanitizer_slowdown_factor(), g_fixture_slowdown_factor,
- g_poller_slowdown_factor, grpc_test_slowdown_factor());
- /* seed rng with pid, so we don't end up with the same random numbers as a
- concurrently running test binary */
- srand(seed());
-}
-
+ gpr_log(GPR_DEBUG,
+ "test slowdown factor: sanitizer=%" PRId64 ", fixture=%" PRId64
+ ", poller=%" PRId64 ", total=%" PRId64,
+ grpc_test_sanitizer_slowdown_factor(), g_fixture_slowdown_factor,
+ g_poller_slowdown_factor, grpc_test_slowdown_factor());
+ /* seed rng with pid, so we don't end up with the same random numbers as a
+ concurrently running test binary */
+ srand(seed());
+}
+
bool grpc_wait_until_shutdown(int64_t time_s) {
gpr_timespec deadline = grpc_timeout_seconds_to_deadline(time_s);
while (grpc_is_initialized()) {
@@ -405,13 +405,13 @@ bool grpc_wait_until_shutdown(int64_t time_s) {
return true;
}
-namespace grpc {
-namespace testing {
-
-TestEnvironment::TestEnvironment(int argc, char** argv) {
- grpc_test_init(argc, argv);
-}
-
+namespace grpc {
+namespace testing {
+
+TestEnvironment::TestEnvironment(int argc, char** argv) {
+ grpc_test_init(argc, argv);
+}
+
TestEnvironment::~TestEnvironment() {
// This will wait until gRPC shutdown has actually happened to make sure
// no gRPC resources (such as thread) are active. (timeout = 10s)
@@ -429,7 +429,7 @@ TestEnvironment::~TestEnvironment() {
}
gpr_log(GPR_INFO, "TestEnvironment ends");
}
-
+
TestGrpcScope::TestGrpcScope() { grpc_init(); }
TestGrpcScope::~TestGrpcScope() {
@@ -439,5 +439,5 @@ TestGrpcScope::~TestGrpcScope() {
}
}
-} // namespace testing
-} // namespace grpc
+} // namespace testing
+} // namespace grpc
diff --git a/contrib/libs/grpc/test/core/util/test_config.h b/contrib/libs/grpc/test/core/util/test_config.h
index 63ad2a51ba..6ac43de266 100644
--- a/contrib/libs/grpc/test/core/util/test_config.h
+++ b/contrib/libs/grpc/test/core/util/test_config.h
@@ -1,42 +1,42 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_TEST_CONFIG_H
-#define GRPC_TEST_CORE_UTIL_TEST_CONFIG_H
-
-#include <grpc/support/time.h>
-
-extern int64_t g_fixture_slowdown_factor;
-extern int64_t g_poller_slowdown_factor;
-
-/* Returns an appropriate scaling factor for timeouts. */
-int64_t grpc_test_slowdown_factor();
-
-/* Converts a given timeout (in seconds) to a deadline. */
-gpr_timespec grpc_timeout_seconds_to_deadline(int64_t time_s);
-
-/* Converts a given timeout (in milliseconds) to a deadline. */
-gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms);
-
-#if !defined(GRPC_TEST_CUSTOM_PICK_PORT) && !defined(GRPC_PORT_ISOLATED_RUNTIME)
-#define GRPC_TEST_PICK_PORT
-#endif
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_TEST_CONFIG_H
+#define GRPC_TEST_CORE_UTIL_TEST_CONFIG_H
+
+#include <grpc/support/time.h>
+
+extern int64_t g_fixture_slowdown_factor;
+extern int64_t g_poller_slowdown_factor;
+
+/* Returns an appropriate scaling factor for timeouts. */
+int64_t grpc_test_slowdown_factor();
+
+/* Converts a given timeout (in seconds) to a deadline. */
+gpr_timespec grpc_timeout_seconds_to_deadline(int64_t time_s);
+
+/* Converts a given timeout (in milliseconds) to a deadline. */
+gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms);
+
+#if !defined(GRPC_TEST_CUSTOM_PICK_PORT) && !defined(GRPC_PORT_ISOLATED_RUNTIME)
+#define GRPC_TEST_PICK_PORT
+#endif
+
// Returns whether this is built under ThreadSanitizer
bool BuiltUnderTsan();
@@ -49,24 +49,24 @@ bool BuiltUnderMsan();
// Returns whether this is built under UndefinedBehaviorSanitizer
bool BuiltUnderUbsan();
-// Prefer TestEnvironment below.
-void grpc_test_init(int argc, char** argv);
-
+// Prefer TestEnvironment below.
+void grpc_test_init(int argc, char** argv);
+
// Wait until gRPC is fully shut down.
// Returns if grpc is shutdown
bool grpc_wait_until_shutdown(int64_t time_s);
-namespace grpc {
-namespace testing {
-
-// A TestEnvironment object should be alive in the main function of a test. It
-// provides test init and shutdown inside.
-class TestEnvironment {
- public:
- TestEnvironment(int argc, char** argv);
- ~TestEnvironment();
-};
-
+namespace grpc {
+namespace testing {
+
+// A TestEnvironment object should be alive in the main function of a test. It
+// provides test init and shutdown inside.
+class TestEnvironment {
+ public:
+ TestEnvironment(int argc, char** argv);
+ ~TestEnvironment();
+};
+
// A TestGrpcScope makes sure that
// - when it's created, gRPC will be initialized
// - when it's destroyed, gRPC will shutdown and it waits until shutdown
@@ -76,7 +76,7 @@ class TestGrpcScope {
~TestGrpcScope();
};
-} // namespace testing
-} // namespace grpc
-
-#endif /* GRPC_TEST_CORE_UTIL_TEST_CONFIG_H */
+} // namespace testing
+} // namespace grpc
+
+#endif /* GRPC_TEST_CORE_UTIL_TEST_CONFIG_H */
diff --git a/contrib/libs/grpc/test/core/util/test_lb_policies.cc b/contrib/libs/grpc/test/core/util/test_lb_policies.cc
index e163d7b9cb..c1c1e9c56a 100644
--- a/contrib/libs/grpc/test/core/util/test_lb_policies.cc
+++ b/contrib/libs/grpc/test/core/util/test_lb_policies.cc
@@ -1,84 +1,84 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/test_lb_policies.h"
-
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/test_lb_policies.h"
+
#include <util/generic/string.h>
-
-#include <grpc/support/log.h>
-
-#include "src/core/ext/filters/client_channel/lb_policy.h"
-#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/channel/channelz.h"
-#include "src/core/lib/debug/trace.h"
-#include "src/core/lib/gprpp/memory.h"
-#include "src/core/lib/gprpp/orphanable.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
-#include "src/core/lib/iomgr/closure.h"
-#include "src/core/lib/iomgr/combiner.h"
-#include "src/core/lib/iomgr/error.h"
-#include "src/core/lib/iomgr/pollset_set.h"
-#include "src/core/lib/json/json.h"
-#include "src/core/lib/transport/connectivity_state.h"
-
-namespace grpc_core {
-
-namespace {
-
-//
-// ForwardingLoadBalancingPolicy
-//
-
-// A minimal forwarding class to avoid implementing a standalone test LB.
-class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy {
- public:
- ForwardingLoadBalancingPolicy(
- std::unique_ptr<ChannelControlHelper> delegating_helper, Args args,
+
+#include <grpc/support/log.h>
+
+#include "src/core/ext/filters/client_channel/lb_policy.h"
+#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/channelz.h"
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/iomgr/closure.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/pollset_set.h"
+#include "src/core/lib/json/json.h"
+#include "src/core/lib/transport/connectivity_state.h"
+
+namespace grpc_core {
+
+namespace {
+
+//
+// ForwardingLoadBalancingPolicy
+//
+
+// A minimal forwarding class to avoid implementing a standalone test LB.
+class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy {
+ public:
+ ForwardingLoadBalancingPolicy(
+ std::unique_ptr<ChannelControlHelper> delegating_helper, Args args,
const TString& delegate_policy_name, intptr_t initial_refcount = 1)
- : LoadBalancingPolicy(std::move(args), initial_refcount) {
- Args delegate_args;
+ : LoadBalancingPolicy(std::move(args), initial_refcount) {
+ Args delegate_args;
delegate_args.work_serializer = work_serializer();
- delegate_args.channel_control_helper = std::move(delegating_helper);
- delegate_args.args = args.args;
- delegate_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
- delegate_policy_name.c_str(), std::move(delegate_args));
- grpc_pollset_set_add_pollset_set(delegate_->interested_parties(),
- interested_parties());
- }
-
- ~ForwardingLoadBalancingPolicy() override = default;
-
- void UpdateLocked(UpdateArgs args) override {
- delegate_->UpdateLocked(std::move(args));
- }
-
- void ExitIdleLocked() override { delegate_->ExitIdleLocked(); }
-
- void ResetBackoffLocked() override { delegate_->ResetBackoffLocked(); }
-
- private:
- void ShutdownLocked() override { delegate_.reset(); }
-
- OrphanablePtr<LoadBalancingPolicy> delegate_;
-};
-
-//
+ delegate_args.channel_control_helper = std::move(delegating_helper);
+ delegate_args.args = args.args;
+ delegate_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
+ delegate_policy_name.c_str(), std::move(delegate_args));
+ grpc_pollset_set_add_pollset_set(delegate_->interested_parties(),
+ interested_parties());
+ }
+
+ ~ForwardingLoadBalancingPolicy() override = default;
+
+ void UpdateLocked(UpdateArgs args) override {
+ delegate_->UpdateLocked(std::move(args));
+ }
+
+ void ExitIdleLocked() override { delegate_->ExitIdleLocked(); }
+
+ void ResetBackoffLocked() override { delegate_->ResetBackoffLocked(); }
+
+ private:
+ void ShutdownLocked() override { delegate_.reset(); }
+
+ OrphanablePtr<LoadBalancingPolicy> delegate_;
+};
+
+//
// CopyMetadataToVector()
//
@@ -191,117 +191,117 @@ class TestPickArgsLbFactory : public LoadBalancingPolicyFactory {
};
//
-// InterceptRecvTrailingMetadataLoadBalancingPolicy
-//
-
-constexpr char kInterceptRecvTrailingMetadataLbPolicyName[] =
- "intercept_trailing_metadata_lb";
-
-class InterceptRecvTrailingMetadataLoadBalancingPolicy
- : public ForwardingLoadBalancingPolicy {
- public:
- InterceptRecvTrailingMetadataLoadBalancingPolicy(
+// InterceptRecvTrailingMetadataLoadBalancingPolicy
+//
+
+constexpr char kInterceptRecvTrailingMetadataLbPolicyName[] =
+ "intercept_trailing_metadata_lb";
+
+class InterceptRecvTrailingMetadataLoadBalancingPolicy
+ : public ForwardingLoadBalancingPolicy {
+ public:
+ InterceptRecvTrailingMetadataLoadBalancingPolicy(
Args args, InterceptRecvTrailingMetadataCallback cb)
- : ForwardingLoadBalancingPolicy(
+ : ForwardingLoadBalancingPolicy(
y_absl::make_unique<Helper>(
- RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy>(
- this),
+ RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy>(
+ this),
std::move(cb)),
- std::move(args),
- /*delegate_lb_policy_name=*/"pick_first",
- /*initial_refcount=*/2) {}
-
- ~InterceptRecvTrailingMetadataLoadBalancingPolicy() override = default;
-
- const char* name() const override {
- return kInterceptRecvTrailingMetadataLbPolicyName;
- }
-
- private:
- class Picker : public SubchannelPicker {
- public:
+ std::move(args),
+ /*delegate_lb_policy_name=*/"pick_first",
+ /*initial_refcount=*/2) {}
+
+ ~InterceptRecvTrailingMetadataLoadBalancingPolicy() override = default;
+
+ const char* name() const override {
+ return kInterceptRecvTrailingMetadataLbPolicyName;
+ }
+
+ private:
+ class Picker : public SubchannelPicker {
+ public:
Picker(std::unique_ptr<SubchannelPicker> delegate_picker,
InterceptRecvTrailingMetadataCallback cb)
: delegate_picker_(std::move(delegate_picker)), cb_(std::move(cb)) {}
-
- PickResult Pick(PickArgs args) override {
- // Do pick.
- PickResult result = delegate_picker_->Pick(args);
- // Intercept trailing metadata.
- if (result.type == PickResult::PICK_COMPLETE &&
- result.subchannel != nullptr) {
- new (args.call_state->Alloc(sizeof(TrailingMetadataHandler)))
+
+ PickResult Pick(PickArgs args) override {
+ // Do pick.
+ PickResult result = delegate_picker_->Pick(args);
+ // Intercept trailing metadata.
+ if (result.type == PickResult::PICK_COMPLETE &&
+ result.subchannel != nullptr) {
+ new (args.call_state->Alloc(sizeof(TrailingMetadataHandler)))
TrailingMetadataHandler(&result, cb_);
- }
- return result;
- }
-
- private:
- std::unique_ptr<SubchannelPicker> delegate_picker_;
- InterceptRecvTrailingMetadataCallback cb_;
- };
-
- class Helper : public ChannelControlHelper {
- public:
- Helper(
- RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent,
+ }
+ return result;
+ }
+
+ private:
+ std::unique_ptr<SubchannelPicker> delegate_picker_;
+ InterceptRecvTrailingMetadataCallback cb_;
+ };
+
+ class Helper : public ChannelControlHelper {
+ public:
+ Helper(
+ RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent,
InterceptRecvTrailingMetadataCallback cb)
: parent_(std::move(parent)), cb_(std::move(cb)) {}
-
- RefCountedPtr<SubchannelInterface> CreateSubchannel(
+
+ RefCountedPtr<SubchannelInterface> CreateSubchannel(
ServerAddress address, const grpc_channel_args& args) override {
return parent_->channel_control_helper()->CreateSubchannel(
std::move(address), args);
- }
-
+ }
+
void UpdateState(grpc_connectivity_state state, const y_absl::Status& status,
- std::unique_ptr<SubchannelPicker> picker) override {
- parent_->channel_control_helper()->UpdateState(
+ std::unique_ptr<SubchannelPicker> picker) override {
+ parent_->channel_control_helper()->UpdateState(
state, status, y_absl::make_unique<Picker>(std::move(picker), cb_));
- }
-
- void RequestReresolution() override {
- parent_->channel_control_helper()->RequestReresolution();
- }
-
+ }
+
+ void RequestReresolution() override {
+ parent_->channel_control_helper()->RequestReresolution();
+ }
+
void AddTraceEvent(TraceSeverity severity,
y_absl::string_view message) override {
- parent_->channel_control_helper()->AddTraceEvent(severity, message);
- }
-
- private:
- RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent_;
- InterceptRecvTrailingMetadataCallback cb_;
- };
-
- class TrailingMetadataHandler {
- public:
- TrailingMetadataHandler(PickResult* result,
+ parent_->channel_control_helper()->AddTraceEvent(severity, message);
+ }
+
+ private:
+ RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent_;
+ InterceptRecvTrailingMetadataCallback cb_;
+ };
+
+ class TrailingMetadataHandler {
+ public:
+ TrailingMetadataHandler(PickResult* result,
InterceptRecvTrailingMetadataCallback cb)
: cb_(std::move(cb)) {
- result->recv_trailing_metadata_ready = [this](grpc_error* error,
- MetadataInterface* metadata,
- CallState* call_state) {
- RecordRecvTrailingMetadata(error, metadata, call_state);
- };
- }
-
- private:
- void RecordRecvTrailingMetadata(grpc_error* /*error*/,
- MetadataInterface* recv_trailing_metadata,
- CallState* call_state) {
+ result->recv_trailing_metadata_ready = [this](grpc_error* error,
+ MetadataInterface* metadata,
+ CallState* call_state) {
+ RecordRecvTrailingMetadata(error, metadata, call_state);
+ };
+ }
+
+ private:
+ void RecordRecvTrailingMetadata(grpc_error* /*error*/,
+ MetadataInterface* recv_trailing_metadata,
+ CallState* call_state) {
TrailingMetadataArgsSeen args_seen;
args_seen.backend_metric_data = call_state->GetBackendMetricData();
- GPR_ASSERT(recv_trailing_metadata != nullptr);
+ GPR_ASSERT(recv_trailing_metadata != nullptr);
args_seen.metadata = CopyMetadataToVector(recv_trailing_metadata);
cb_(args_seen);
- this->~TrailingMetadataHandler();
- }
-
- InterceptRecvTrailingMetadataCallback cb_;
- };
-};
-
+ this->~TrailingMetadataHandler();
+ }
+
+ InterceptRecvTrailingMetadataCallback cb_;
+ };
+};
+
class InterceptTrailingConfig : public LoadBalancingPolicy::Config {
public:
const char* name() const override {
@@ -309,30 +309,30 @@ class InterceptTrailingConfig : public LoadBalancingPolicy::Config {
}
};
-class InterceptTrailingFactory : public LoadBalancingPolicyFactory {
- public:
+class InterceptTrailingFactory : public LoadBalancingPolicyFactory {
+ public:
explicit InterceptTrailingFactory(InterceptRecvTrailingMetadataCallback cb)
: cb_(std::move(cb)) {}
-
- OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
- LoadBalancingPolicy::Args args) const override {
- return MakeOrphanable<InterceptRecvTrailingMetadataLoadBalancingPolicy>(
+
+ OrphanablePtr<LoadBalancingPolicy> CreateLoadBalancingPolicy(
+ LoadBalancingPolicy::Args args) const override {
+ return MakeOrphanable<InterceptRecvTrailingMetadataLoadBalancingPolicy>(
std::move(args), cb_);
- }
-
- const char* name() const override {
- return kInterceptRecvTrailingMetadataLbPolicyName;
- }
-
- RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
+ }
+
+ const char* name() const override {
+ return kInterceptRecvTrailingMetadataLbPolicyName;
+ }
+
+ RefCountedPtr<LoadBalancingPolicy::Config> ParseLoadBalancingConfig(
const Json& /*json*/, grpc_error** /*error*/) const override {
return MakeRefCounted<InterceptTrailingConfig>();
- }
-
- private:
- InterceptRecvTrailingMetadataCallback cb_;
-};
-
+ }
+
+ private:
+ InterceptRecvTrailingMetadataCallback cb_;
+};
+
//
// AddressTestLoadBalancingPolicy
//
@@ -414,22 +414,22 @@ class AddressTestFactory : public LoadBalancingPolicyFactory {
AddressTestCallback cb_;
};
-} // namespace
-
+} // namespace
+
void RegisterTestPickArgsLoadBalancingPolicy(TestPickArgsCallback cb) {
LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory(
y_absl::make_unique<TestPickArgsLbFactory>(std::move(cb)));
}
-void RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
+void RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
InterceptRecvTrailingMetadataCallback cb) {
- LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory(
+ LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory(
y_absl::make_unique<InterceptTrailingFactory>(std::move(cb)));
-}
-
+}
+
void RegisterAddressTestLoadBalancingPolicy(AddressTestCallback cb) {
LoadBalancingPolicyRegistry::Builder::RegisterLoadBalancingPolicyFactory(
y_absl::make_unique<AddressTestFactory>(std::move(cb)));
}
-} // namespace grpc_core
+} // namespace grpc_core
diff --git a/contrib/libs/grpc/test/core/util/test_lb_policies.h b/contrib/libs/grpc/test/core/util/test_lb_policies.h
index a5c02283b6..9e14c707a3 100644
--- a/contrib/libs/grpc/test/core/util/test_lb_policies.h
+++ b/contrib/libs/grpc/test/core/util/test_lb_policies.h
@@ -1,30 +1,30 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H
-#define GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H
-
-#include "src/core/ext/filters/client_channel/lb_policy.h"
-
-namespace grpc_core {
-
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H
+#define GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H
+
+#include "src/core/ext/filters/client_channel/lb_policy.h"
+
+namespace grpc_core {
+
using MetadataVector = std::vector<std::pair<TString, TString>>;
-
+
struct PickArgsSeen {
TString path;
MetadataVector metadata;
@@ -44,17 +44,17 @@ struct TrailingMetadataArgsSeen {
using InterceptRecvTrailingMetadataCallback =
std::function<void(const TrailingMetadataArgsSeen&)>;
-// Registers an LB policy called "intercept_trailing_metadata_lb" that
+// Registers an LB policy called "intercept_trailing_metadata_lb" that
// invokes cb when trailing metadata is received for each call.
-void RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
+void RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
InterceptRecvTrailingMetadataCallback cb);
-
+
using AddressTestCallback = std::function<void(const ServerAddress&)>;
// Registers an LB policy called "address_test_lb" that invokes cb for each
// address used to create a subchannel.
void RegisterAddressTestLoadBalancingPolicy(AddressTestCallback cb);
-} // namespace grpc_core
-
-#endif // GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H
+} // namespace grpc_core
+
+#endif // GRPC_TEST_CORE_UTIL_TEST_LB_POLICIES_H
diff --git a/contrib/libs/grpc/test/core/util/test_tcp_server.cc b/contrib/libs/grpc/test/core/util/test_tcp_server.cc
index 9550ef6b08..9dd1e2af36 100644
--- a/contrib/libs/grpc/test/core/util/test_tcp_server.cc
+++ b/contrib/libs/grpc/test/core/util/test_tcp_server.cc
@@ -1,116 +1,116 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/iomgr/socket_utils.h"
-
-#include "test/core/util/test_tcp_server.h"
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpc/support/time.h>
-#include <string.h>
-
-#include "src/core/lib/iomgr/endpoint.h"
-#include "src/core/lib/iomgr/resolve_address.h"
-#include "src/core/lib/iomgr/tcp_server.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-
-static void on_server_destroyed(void* data, grpc_error* /*error*/) {
- test_tcp_server* server = static_cast<test_tcp_server*>(data);
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/socket_utils.h"
+
+#include "test/core/util/test_tcp_server.h"
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpc/support/time.h>
+#include <string.h>
+
+#include "src/core/lib/iomgr/endpoint.h"
+#include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/tcp_server.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+static void on_server_destroyed(void* data, grpc_error* /*error*/) {
+ test_tcp_server* server = static_cast<test_tcp_server*>(data);
server->shutdown = true;
-}
-
-void test_tcp_server_init(test_tcp_server* server,
- grpc_tcp_server_cb on_connect, void* user_data) {
- grpc_init();
- GRPC_CLOSURE_INIT(&server->shutdown_complete, on_server_destroyed, server,
- grpc_schedule_on_exec_ctx);
+}
+
+void test_tcp_server_init(test_tcp_server* server,
+ grpc_tcp_server_cb on_connect, void* user_data) {
+ grpc_init();
+ GRPC_CLOSURE_INIT(&server->shutdown_complete, on_server_destroyed, server,
+ grpc_schedule_on_exec_ctx);
grpc_pollset* pollset =
static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
grpc_pollset_init(pollset, &server->mu);
server->pollset.push_back(pollset);
- server->on_connect = on_connect;
- server->cb_data = user_data;
-}
-
-void test_tcp_server_start(test_tcp_server* server, int port) {
- grpc_resolved_address resolved_addr;
- grpc_sockaddr_in* addr =
- reinterpret_cast<grpc_sockaddr_in*>(resolved_addr.addr);
- int port_added;
- grpc_core::ExecCtx exec_ctx;
-
- addr->sin_family = GRPC_AF_INET;
- addr->sin_port = grpc_htons(static_cast<uint16_t>(port));
- memset(&addr->sin_addr, 0, sizeof(addr->sin_addr));
-
- grpc_error* error = grpc_tcp_server_create(&server->shutdown_complete,
- nullptr, &server->tcp_server);
- GPR_ASSERT(error == GRPC_ERROR_NONE);
- error =
- grpc_tcp_server_add_port(server->tcp_server, &resolved_addr, &port_added);
- GPR_ASSERT(error == GRPC_ERROR_NONE);
- GPR_ASSERT(port_added == port);
-
+ server->on_connect = on_connect;
+ server->cb_data = user_data;
+}
+
+void test_tcp_server_start(test_tcp_server* server, int port) {
+ grpc_resolved_address resolved_addr;
+ grpc_sockaddr_in* addr =
+ reinterpret_cast<grpc_sockaddr_in*>(resolved_addr.addr);
+ int port_added;
+ grpc_core::ExecCtx exec_ctx;
+
+ addr->sin_family = GRPC_AF_INET;
+ addr->sin_port = grpc_htons(static_cast<uint16_t>(port));
+ memset(&addr->sin_addr, 0, sizeof(addr->sin_addr));
+
+ grpc_error* error = grpc_tcp_server_create(&server->shutdown_complete,
+ nullptr, &server->tcp_server);
+ GPR_ASSERT(error == GRPC_ERROR_NONE);
+ error =
+ grpc_tcp_server_add_port(server->tcp_server, &resolved_addr, &port_added);
+ GPR_ASSERT(error == GRPC_ERROR_NONE);
+ GPR_ASSERT(port_added == port);
+
grpc_tcp_server_start(server->tcp_server, &server->pollset,
- server->on_connect, server->cb_data);
- gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port);
-}
-
-void test_tcp_server_poll(test_tcp_server* server, int milliseconds) {
- grpc_pollset_worker* worker = nullptr;
- grpc_core::ExecCtx exec_ctx;
- grpc_millis deadline = grpc_timespec_to_millis_round_up(
- grpc_timeout_milliseconds_to_deadline(milliseconds));
- gpr_mu_lock(server->mu);
- GRPC_LOG_IF_ERROR("pollset_work",
+ server->on_connect, server->cb_data);
+ gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port);
+}
+
+void test_tcp_server_poll(test_tcp_server* server, int milliseconds) {
+ grpc_pollset_worker* worker = nullptr;
+ grpc_core::ExecCtx exec_ctx;
+ grpc_millis deadline = grpc_timespec_to_millis_round_up(
+ grpc_timeout_milliseconds_to_deadline(milliseconds));
+ gpr_mu_lock(server->mu);
+ GRPC_LOG_IF_ERROR("pollset_work",
grpc_pollset_work(server->pollset[0], &worker, deadline));
- gpr_mu_unlock(server->mu);
-}
-
-static void do_nothing(void* /*arg*/, grpc_error* /*error*/) {}
-static void finish_pollset(void* arg, grpc_error* /*error*/) {
- grpc_pollset_destroy(static_cast<grpc_pollset*>(arg));
-}
-
-void test_tcp_server_destroy(test_tcp_server* server) {
- grpc_core::ExecCtx exec_ctx;
- gpr_timespec shutdown_deadline;
- grpc_closure do_nothing_cb;
- grpc_tcp_server_unref(server->tcp_server);
- GRPC_CLOSURE_INIT(&do_nothing_cb, do_nothing, nullptr,
- grpc_schedule_on_exec_ctx);
- shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(5, GPR_TIMESPAN));
- grpc_core::ExecCtx::Get()->Flush();
- while (!server->shutdown &&
- gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) {
- test_tcp_server_poll(server, 1000);
- }
+ gpr_mu_unlock(server->mu);
+}
+
+static void do_nothing(void* /*arg*/, grpc_error* /*error*/) {}
+static void finish_pollset(void* arg, grpc_error* /*error*/) {
+ grpc_pollset_destroy(static_cast<grpc_pollset*>(arg));
+}
+
+void test_tcp_server_destroy(test_tcp_server* server) {
+ grpc_core::ExecCtx exec_ctx;
+ gpr_timespec shutdown_deadline;
+ grpc_closure do_nothing_cb;
+ grpc_tcp_server_unref(server->tcp_server);
+ GRPC_CLOSURE_INIT(&do_nothing_cb, do_nothing, nullptr,
+ grpc_schedule_on_exec_ctx);
+ shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_seconds(5, GPR_TIMESPAN));
+ grpc_core::ExecCtx::Get()->Flush();
+ while (!server->shutdown &&
+ gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) {
+ test_tcp_server_poll(server, 1000);
+ }
grpc_pollset_shutdown(server->pollset[0],
GRPC_CLOSURE_CREATE(finish_pollset, server->pollset[0],
- grpc_schedule_on_exec_ctx));
- grpc_core::ExecCtx::Get()->Flush();
+ grpc_schedule_on_exec_ctx));
+ grpc_core::ExecCtx::Get()->Flush();
gpr_free(server->pollset[0]);
- grpc_shutdown();
-}
+ grpc_shutdown();
+}
diff --git a/contrib/libs/grpc/test/core/util/test_tcp_server.h b/contrib/libs/grpc/test/core/util/test_tcp_server.h
index 8b40f3a1be..8765ea9a22 100644
--- a/contrib/libs/grpc/test/core/util/test_tcp_server.h
+++ b/contrib/libs/grpc/test/core/util/test_tcp_server.h
@@ -1,47 +1,47 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H
-#define GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H
+#define GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H
+
#include <vector>
-#include <grpc/support/sync.h>
-#include "src/core/lib/iomgr/tcp_server.h"
-
+#include <grpc/support/sync.h>
+#include "src/core/lib/iomgr/tcp_server.h"
+
// test_tcp_server should be stack-allocated or new'ed, never gpr_malloc'ed
// since it contains C++ objects.
struct test_tcp_server {
grpc_tcp_server* tcp_server = nullptr;
- grpc_closure shutdown_complete;
+ grpc_closure shutdown_complete;
bool shutdown = false;
// mu is filled in by grpc_pollset_init and controls the pollset.
// TODO: Switch this to a Mutex once pollset_init can provide a Mutex
- gpr_mu* mu;
+ gpr_mu* mu;
std::vector<grpc_pollset*> pollset;
- grpc_tcp_server_cb on_connect;
- void* cb_data;
+ grpc_tcp_server_cb on_connect;
+ void* cb_data;
};
-
-void test_tcp_server_init(test_tcp_server* server,
- grpc_tcp_server_cb on_connect, void* user_data);
-void test_tcp_server_start(test_tcp_server* server, int port);
-void test_tcp_server_poll(test_tcp_server* server, int milliseconds);
-void test_tcp_server_destroy(test_tcp_server* server);
-
-#endif /* GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H */
+
+void test_tcp_server_init(test_tcp_server* server,
+ grpc_tcp_server_cb on_connect, void* user_data);
+void test_tcp_server_start(test_tcp_server* server, int port);
+void test_tcp_server_poll(test_tcp_server* server, int milliseconds);
+void test_tcp_server_destroy(test_tcp_server* server);
+
+#endif /* GRPC_TEST_CORE_UTIL_TEST_TCP_SERVER_H */
diff --git a/contrib/libs/grpc/test/core/util/tracer_util.cc b/contrib/libs/grpc/test/core/util/tracer_util.cc
index 6b4dfbdccd..34a132daa7 100644
--- a/contrib/libs/grpc/test/core/util/tracer_util.cc
+++ b/contrib/libs/grpc/test/core/util/tracer_util.cc
@@ -1,31 +1,31 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/core/util/test_config.h"
-
-#include "src/core/lib/debug/trace.h"
-
-namespace grpc_core {
-namespace testing {
-
-void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag) {
- flag->set_enabled(1);
-}
-
-} // namespace testing
-} // namespace grpc_core
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/test_config.h"
+
+#include "src/core/lib/debug/trace.h"
+
+namespace grpc_core {
+namespace testing {
+
+void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag) {
+ flag->set_enabled(1);
+}
+
+} // namespace testing
+} // namespace grpc_core
diff --git a/contrib/libs/grpc/test/core/util/tracer_util.h b/contrib/libs/grpc/test/core/util/tracer_util.h
index a4ad95ab5c..0b432ffa46 100644
--- a/contrib/libs/grpc/test/core/util/tracer_util.h
+++ b/contrib/libs/grpc/test/core/util/tracer_util.h
@@ -1,32 +1,32 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CORE_UTIL_TRACER_UTIL_H
-#define GRPC_TEST_CORE_UTIL_TRACER_UTIL_H
-
-namespace grpc_core {
-class TraceFlag;
-
-namespace testing {
-// enables the TraceFlag passed to it. Used for testing purposes.
-void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag);
-
-} // namespace testing
-} // namespace grpc_core
-
-#endif /* GRPC_TEST_CORE_UTIL_TRACER_UTIL_H */
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CORE_UTIL_TRACER_UTIL_H
+#define GRPC_TEST_CORE_UTIL_TRACER_UTIL_H
+
+namespace grpc_core {
+class TraceFlag;
+
+namespace testing {
+// enables the TraceFlag passed to it. Used for testing purposes.
+void grpc_tracer_enable_flag(grpc_core::TraceFlag* flag);
+
+} // namespace testing
+} // namespace grpc_core
+
+#endif /* GRPC_TEST_CORE_UTIL_TRACER_UTIL_H */
diff --git a/contrib/libs/grpc/test/core/util/trickle_endpoint.cc b/contrib/libs/grpc/test/core/util/trickle_endpoint.cc
index 496e2586ab..28ffb0e3e7 100644
--- a/contrib/libs/grpc/test/core/util/trickle_endpoint.cc
+++ b/contrib/libs/grpc/test/core/util/trickle_endpoint.cc
@@ -1,215 +1,215 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "src/core/lib/iomgr/sockaddr.h"
-
-#include "test/core/util/passthru_endpoint.h"
-
-#include <inttypes.h>
-#include <string.h>
-
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-
-#include "src/core/lib/gpr/useful.h"
-#include "src/core/lib/slice/slice_internal.h"
-
-#define WRITE_BUFFER_SIZE (2 * 1024 * 1024)
-
-typedef struct {
- grpc_endpoint base;
- double bytes_per_second;
- grpc_endpoint* wrapped;
- gpr_timespec last_write;
-
- gpr_mu mu;
- grpc_slice_buffer write_buffer;
- grpc_slice_buffer writing_buffer;
- grpc_error* error;
- bool writing;
- grpc_closure* write_cb;
-} trickle_endpoint;
-
-static void te_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb, bool urgent) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- grpc_endpoint_read(te->wrapped, slices, cb, urgent);
-}
-
-static void maybe_call_write_cb_locked(trickle_endpoint* te) {
- if (te->write_cb != nullptr &&
- (te->error != GRPC_ERROR_NONE ||
- te->write_buffer.length <= WRITE_BUFFER_SIZE)) {
- grpc_core::ExecCtx::Run(DEBUG_LOCATION, te->write_cb,
- GRPC_ERROR_REF(te->error));
- te->write_cb = nullptr;
- }
-}
-
-static void te_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
- grpc_closure* cb, void* /*arg*/) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- gpr_mu_lock(&te->mu);
- GPR_ASSERT(te->write_cb == nullptr);
- if (te->write_buffer.length == 0) {
- te->last_write = gpr_now(GPR_CLOCK_MONOTONIC);
- }
- for (size_t i = 0; i < slices->count; i++) {
- grpc_slice_buffer_add(&te->write_buffer,
- grpc_slice_copy(slices->slices[i]));
- }
- te->write_cb = cb;
- maybe_call_write_cb_locked(te);
- gpr_mu_unlock(&te->mu);
-}
-
-static void te_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- grpc_endpoint_add_to_pollset(te->wrapped, pollset);
-}
-
-static void te_add_to_pollset_set(grpc_endpoint* ep,
- grpc_pollset_set* pollset_set) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- grpc_endpoint_add_to_pollset_set(te->wrapped, pollset_set);
-}
-
-static void te_delete_from_pollset_set(grpc_endpoint* ep,
- grpc_pollset_set* pollset_set) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- grpc_endpoint_delete_from_pollset_set(te->wrapped, pollset_set);
-}
-
-static void te_shutdown(grpc_endpoint* ep, grpc_error* why) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- gpr_mu_lock(&te->mu);
- if (te->error == GRPC_ERROR_NONE) {
- te->error = GRPC_ERROR_REF(why);
- }
- maybe_call_write_cb_locked(te);
- gpr_mu_unlock(&te->mu);
- grpc_endpoint_shutdown(te->wrapped, why);
-}
-
-static void te_destroy(grpc_endpoint* ep) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- grpc_endpoint_destroy(te->wrapped);
- gpr_mu_destroy(&te->mu);
- grpc_slice_buffer_destroy_internal(&te->write_buffer);
- grpc_slice_buffer_destroy_internal(&te->writing_buffer);
- GRPC_ERROR_UNREF(te->error);
- gpr_free(te);
-}
-
-static grpc_resource_user* te_get_resource_user(grpc_endpoint* ep) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- return grpc_endpoint_get_resource_user(te->wrapped);
-}
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/iomgr/sockaddr.h"
+
+#include "test/core/util/passthru_endpoint.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+
+#include "src/core/lib/gpr/useful.h"
+#include "src/core/lib/slice/slice_internal.h"
+
+#define WRITE_BUFFER_SIZE (2 * 1024 * 1024)
+
+typedef struct {
+ grpc_endpoint base;
+ double bytes_per_second;
+ grpc_endpoint* wrapped;
+ gpr_timespec last_write;
+
+ gpr_mu mu;
+ grpc_slice_buffer write_buffer;
+ grpc_slice_buffer writing_buffer;
+ grpc_error* error;
+ bool writing;
+ grpc_closure* write_cb;
+} trickle_endpoint;
+
+static void te_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+ grpc_closure* cb, bool urgent) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ grpc_endpoint_read(te->wrapped, slices, cb, urgent);
+}
+
+static void maybe_call_write_cb_locked(trickle_endpoint* te) {
+ if (te->write_cb != nullptr &&
+ (te->error != GRPC_ERROR_NONE ||
+ te->write_buffer.length <= WRITE_BUFFER_SIZE)) {
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION, te->write_cb,
+ GRPC_ERROR_REF(te->error));
+ te->write_cb = nullptr;
+ }
+}
+
+static void te_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+ grpc_closure* cb, void* /*arg*/) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ gpr_mu_lock(&te->mu);
+ GPR_ASSERT(te->write_cb == nullptr);
+ if (te->write_buffer.length == 0) {
+ te->last_write = gpr_now(GPR_CLOCK_MONOTONIC);
+ }
+ for (size_t i = 0; i < slices->count; i++) {
+ grpc_slice_buffer_add(&te->write_buffer,
+ grpc_slice_copy(slices->slices[i]));
+ }
+ te->write_cb = cb;
+ maybe_call_write_cb_locked(te);
+ gpr_mu_unlock(&te->mu);
+}
+
+static void te_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ grpc_endpoint_add_to_pollset(te->wrapped, pollset);
+}
+
+static void te_add_to_pollset_set(grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ grpc_endpoint_add_to_pollset_set(te->wrapped, pollset_set);
+}
+
+static void te_delete_from_pollset_set(grpc_endpoint* ep,
+ grpc_pollset_set* pollset_set) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ grpc_endpoint_delete_from_pollset_set(te->wrapped, pollset_set);
+}
+
+static void te_shutdown(grpc_endpoint* ep, grpc_error* why) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ gpr_mu_lock(&te->mu);
+ if (te->error == GRPC_ERROR_NONE) {
+ te->error = GRPC_ERROR_REF(why);
+ }
+ maybe_call_write_cb_locked(te);
+ gpr_mu_unlock(&te->mu);
+ grpc_endpoint_shutdown(te->wrapped, why);
+}
+
+static void te_destroy(grpc_endpoint* ep) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ grpc_endpoint_destroy(te->wrapped);
+ gpr_mu_destroy(&te->mu);
+ grpc_slice_buffer_destroy_internal(&te->write_buffer);
+ grpc_slice_buffer_destroy_internal(&te->writing_buffer);
+ GRPC_ERROR_UNREF(te->error);
+ gpr_free(te);
+}
+
+static grpc_resource_user* te_get_resource_user(grpc_endpoint* ep) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ return grpc_endpoint_get_resource_user(te->wrapped);
+}
+
static y_absl::string_view te_get_peer(grpc_endpoint* ep) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- return grpc_endpoint_get_peer(te->wrapped);
-}
-
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ return grpc_endpoint_get_peer(te->wrapped);
+}
+
static y_absl::string_view te_get_local_address(grpc_endpoint* ep) {
trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
return grpc_endpoint_get_local_address(te->wrapped);
}
-static int te_get_fd(grpc_endpoint* ep) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- return grpc_endpoint_get_fd(te->wrapped);
-}
-
-static bool te_can_track_err(grpc_endpoint* /*ep*/) { return false; }
-
-static void te_finish_write(void* arg, grpc_error* /*error*/) {
- trickle_endpoint* te = static_cast<trickle_endpoint*>(arg);
- gpr_mu_lock(&te->mu);
- te->writing = false;
- grpc_slice_buffer_reset_and_unref(&te->writing_buffer);
- gpr_mu_unlock(&te->mu);
-}
-
-static const grpc_endpoint_vtable vtable = {te_read,
- te_write,
- te_add_to_pollset,
- te_add_to_pollset_set,
- te_delete_from_pollset_set,
- te_shutdown,
- te_destroy,
- te_get_resource_user,
- te_get_peer,
+static int te_get_fd(grpc_endpoint* ep) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ return grpc_endpoint_get_fd(te->wrapped);
+}
+
+static bool te_can_track_err(grpc_endpoint* /*ep*/) { return false; }
+
+static void te_finish_write(void* arg, grpc_error* /*error*/) {
+ trickle_endpoint* te = static_cast<trickle_endpoint*>(arg);
+ gpr_mu_lock(&te->mu);
+ te->writing = false;
+ grpc_slice_buffer_reset_and_unref(&te->writing_buffer);
+ gpr_mu_unlock(&te->mu);
+}
+
+static const grpc_endpoint_vtable vtable = {te_read,
+ te_write,
+ te_add_to_pollset,
+ te_add_to_pollset_set,
+ te_delete_from_pollset_set,
+ te_shutdown,
+ te_destroy,
+ te_get_resource_user,
+ te_get_peer,
te_get_local_address,
- te_get_fd,
- te_can_track_err};
-
-grpc_endpoint* grpc_trickle_endpoint_create(grpc_endpoint* wrap,
- double bytes_per_second) {
- trickle_endpoint* te =
- static_cast<trickle_endpoint*>(gpr_malloc(sizeof(*te)));
- te->base.vtable = &vtable;
- te->wrapped = wrap;
- te->bytes_per_second = bytes_per_second;
- te->write_cb = nullptr;
- gpr_mu_init(&te->mu);
- grpc_slice_buffer_init(&te->write_buffer);
- grpc_slice_buffer_init(&te->writing_buffer);
- te->error = GRPC_ERROR_NONE;
- te->writing = false;
- return &te->base;
-}
-
-static double ts2dbl(gpr_timespec s) {
- return static_cast<double>(s.tv_sec) + 1e-9 * static_cast<double>(s.tv_nsec);
-}
-
-size_t grpc_trickle_endpoint_trickle(grpc_endpoint* ep) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- gpr_mu_lock(&te->mu);
- if (!te->writing && te->write_buffer.length > 0) {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- double elapsed = ts2dbl(gpr_time_sub(now, te->last_write));
- size_t bytes = static_cast<size_t>(te->bytes_per_second * elapsed);
- // gpr_log(GPR_DEBUG, "%lf elapsed --> %" PRIdPTR " bytes", elapsed, bytes);
- if (bytes > 0) {
- grpc_slice_buffer_move_first(&te->write_buffer,
- GPR_MIN(bytes, te->write_buffer.length),
- &te->writing_buffer);
- te->writing = true;
- te->last_write = now;
- grpc_endpoint_write(
- te->wrapped, &te->writing_buffer,
- GRPC_CLOSURE_CREATE(te_finish_write, te, grpc_schedule_on_exec_ctx),
- nullptr);
- maybe_call_write_cb_locked(te);
- }
- }
- size_t backlog = te->write_buffer.length;
- gpr_mu_unlock(&te->mu);
- return backlog;
-}
-
-size_t grpc_trickle_get_backlog(grpc_endpoint* ep) {
- trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
- gpr_mu_lock(&te->mu);
- size_t backlog = te->write_buffer.length;
- gpr_mu_unlock(&te->mu);
- return backlog;
-}
+ te_get_fd,
+ te_can_track_err};
+
+grpc_endpoint* grpc_trickle_endpoint_create(grpc_endpoint* wrap,
+ double bytes_per_second) {
+ trickle_endpoint* te =
+ static_cast<trickle_endpoint*>(gpr_malloc(sizeof(*te)));
+ te->base.vtable = &vtable;
+ te->wrapped = wrap;
+ te->bytes_per_second = bytes_per_second;
+ te->write_cb = nullptr;
+ gpr_mu_init(&te->mu);
+ grpc_slice_buffer_init(&te->write_buffer);
+ grpc_slice_buffer_init(&te->writing_buffer);
+ te->error = GRPC_ERROR_NONE;
+ te->writing = false;
+ return &te->base;
+}
+
+static double ts2dbl(gpr_timespec s) {
+ return static_cast<double>(s.tv_sec) + 1e-9 * static_cast<double>(s.tv_nsec);
+}
+
+size_t grpc_trickle_endpoint_trickle(grpc_endpoint* ep) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ gpr_mu_lock(&te->mu);
+ if (!te->writing && te->write_buffer.length > 0) {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ double elapsed = ts2dbl(gpr_time_sub(now, te->last_write));
+ size_t bytes = static_cast<size_t>(te->bytes_per_second * elapsed);
+ // gpr_log(GPR_DEBUG, "%lf elapsed --> %" PRIdPTR " bytes", elapsed, bytes);
+ if (bytes > 0) {
+ grpc_slice_buffer_move_first(&te->write_buffer,
+ GPR_MIN(bytes, te->write_buffer.length),
+ &te->writing_buffer);
+ te->writing = true;
+ te->last_write = now;
+ grpc_endpoint_write(
+ te->wrapped, &te->writing_buffer,
+ GRPC_CLOSURE_CREATE(te_finish_write, te, grpc_schedule_on_exec_ctx),
+ nullptr);
+ maybe_call_write_cb_locked(te);
+ }
+ }
+ size_t backlog = te->write_buffer.length;
+ gpr_mu_unlock(&te->mu);
+ return backlog;
+}
+
+size_t grpc_trickle_get_backlog(grpc_endpoint* ep) {
+ trickle_endpoint* te = reinterpret_cast<trickle_endpoint*>(ep);
+ gpr_mu_lock(&te->mu);
+ size_t backlog = te->write_buffer.length;
+ gpr_mu_unlock(&te->mu);
+ return backlog;
+}
diff --git a/contrib/libs/grpc/test/core/util/trickle_endpoint.h b/contrib/libs/grpc/test/core/util/trickle_endpoint.h
index 4912747d2a..cd07de905a 100644
--- a/contrib/libs/grpc/test/core/util/trickle_endpoint.h
+++ b/contrib/libs/grpc/test/core/util/trickle_endpoint.h
@@ -1,32 +1,32 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef TRICKLE_ENDPOINT_H
-#define TRICKLE_ENDPOINT_H
-
-#include "src/core/lib/iomgr/endpoint.h"
-
-grpc_endpoint* grpc_trickle_endpoint_create(grpc_endpoint* wrap,
- double bytes_per_second);
-
-/* Allow up to \a bytes through the endpoint. Returns the new backlog. */
-size_t grpc_trickle_endpoint_trickle(grpc_endpoint* endpoint);
-
-size_t grpc_trickle_get_backlog(grpc_endpoint* endpoint);
-
-#endif
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef TRICKLE_ENDPOINT_H
+#define TRICKLE_ENDPOINT_H
+
+#include "src/core/lib/iomgr/endpoint.h"
+
+grpc_endpoint* grpc_trickle_endpoint_create(grpc_endpoint* wrap,
+ double bytes_per_second);
+
+/* Allow up to \a bytes through the endpoint. Returns the new backlog. */
+size_t grpc_trickle_endpoint_trickle(grpc_endpoint* endpoint);
+
+size_t grpc_trickle_get_backlog(grpc_endpoint* endpoint);
+
+#endif
diff --git a/contrib/libs/grpc/test/core/util/tsan_suppressions.txt b/contrib/libs/grpc/test/core/util/tsan_suppressions.txt
index 8db4f52d4a..e0c7907228 100644
--- a/contrib/libs/grpc/test/core/util/tsan_suppressions.txt
+++ b/contrib/libs/grpc/test/core/util/tsan_suppressions.txt
@@ -1,13 +1,13 @@
-# OPENSSL_cleanse does racy access to a global
-race:OPENSSL_cleanse
-race:cleanse_ctr
-# these are legitimate races in OpenSSL, and it appears those folks are looking at it
-# https://www.mail-archive.com/openssl-dev@openssl.org/msg09019.html
-race:ssleay_rand_add
-race:ssleay_rand_bytes
-race:__sleep_for
-# protobuf has an idempotent write race in ByteSize/GetCachedSize
-# https://github.com/google/protobuf/issues/2169
-race:ByteSize
-race:ByteSizeLong
-race:GetCachedSize
+# OPENSSL_cleanse does racy access to a global
+race:OPENSSL_cleanse
+race:cleanse_ctr
+# these are legitimate races in OpenSSL, and it appears those folks are looking at it
+# https://www.mail-archive.com/openssl-dev@openssl.org/msg09019.html
+race:ssleay_rand_add
+race:ssleay_rand_bytes
+race:__sleep_for
+# protobuf has an idempotent write race in ByteSize/GetCachedSize
+# https://github.com/google/protobuf/issues/2169
+race:ByteSize
+race:ByteSizeLong
+race:GetCachedSize
diff --git a/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt b/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt
index f4fdce3d5c..ca233b8a28 100644
--- a/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt
+++ b/contrib/libs/grpc/test/core/util/ubsan_suppressions.txt
@@ -1,27 +1,27 @@
-# Protobuf stuff
-nonnull-attribute:google::protobuf::*
-alignment:google::protobuf::*
-nonnull-attribute:_tr_stored_block
-# The following 5 suppressors should be removed as part of C++ cleanup
-enum:client_fuzzer_one_entry
-enum:message_compress_test
-enum:transport_security_test
-enum:algorithm_test
-alignment:transport_security_test
-# TODO(jtattermusch): address issues and remove the supressions
-nonnull-attribute:gsec_aes_gcm_aead_crypter_decrypt_iovec
-nonnull-attribute:gsec_test_random_encrypt_decrypt
-nonnull-attribute:gsec_test_multiple_random_encrypt_decrypt
-nonnull-attribute:gsec_test_copy
-nonnull-attribute:gsec_test_encrypt_decrypt_test_vector
+# Protobuf stuff
+nonnull-attribute:google::protobuf::*
+alignment:google::protobuf::*
+nonnull-attribute:_tr_stored_block
+# The following 5 suppressors should be removed as part of C++ cleanup
+enum:client_fuzzer_one_entry
+enum:message_compress_test
+enum:transport_security_test
+enum:algorithm_test
+alignment:transport_security_test
+# TODO(jtattermusch): address issues and remove the supressions
+nonnull-attribute:gsec_aes_gcm_aead_crypter_decrypt_iovec
+nonnull-attribute:gsec_test_random_encrypt_decrypt
+nonnull-attribute:gsec_test_multiple_random_encrypt_decrypt
+nonnull-attribute:gsec_test_copy
+nonnull-attribute:gsec_test_encrypt_decrypt_test_vector
alignment:y_absl::little_endian::Store64
alignment:y_absl::little_endian::Load64
-float-divide-by-zero:grpc::testing::postprocess_scenario_result
-enum:grpc_op_string
-signed-integer-overflow:chrono
-enum:grpc_http2_error_to_grpc_status
-enum:grpc_chttp2_cancel_stream
-# TODO(juanlishen): Remove this supression after
-# https://github.com/GoogleCloudPlatform/layer-definitions/issues/531 is
-# addressed.
-alignment:grpc_core::XdsPriorityListUpdate::*
+float-divide-by-zero:grpc::testing::postprocess_scenario_result
+enum:grpc_op_string
+signed-integer-overflow:chrono
+enum:grpc_http2_error_to_grpc_status
+enum:grpc_chttp2_cancel_stream
+# TODO(juanlishen): Remove this supression after
+# https://github.com/GoogleCloudPlatform/layer-definitions/issues/531 is
+# addressed.
+alignment:grpc_core::XdsPriorityListUpdate::*
diff --git a/contrib/libs/grpc/test/core/util/ya.make b/contrib/libs/grpc/test/core/util/ya.make
index 51d10abb13..fbaad80cad 100644
--- a/contrib/libs/grpc/test/core/util/ya.make
+++ b/contrib/libs/grpc/test/core/util/ya.make
@@ -1,53 +1,53 @@
-LIBRARY()
-
+LIBRARY()
+
LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
+
OWNER(dvshkurko)
-PEERDIR(
- contrib/libs/grpc
+PEERDIR(
+ contrib/libs/grpc
contrib/restricted/abseil-cpp-tstring/y_absl/debugging/failure_signal_handler
-)
-
+)
+
ADDINCL(
${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
contrib/libs/grpc
)
-
-NO_COMPILER_WARNINGS()
-
-SRCS(
- # cmdline.cc
- # cmdline_test.cc
- # debugger_macros.cc
- # fuzzer_corpus_test.cc
- # fuzzer_one_entry_runner.sh*
- # fuzzer_util.cc
- # grpc_fuzzer.bzl
- # grpc_profiler.cc
- # histogram.cc
- # histogram_test.cc
- # lsan_suppressions.txt
- # memory_counters.cc
- # mock_endpoint.cc
- # one_corpus_entry_fuzzer.cc
- # parse_hexstring.cc
- # passthru_endpoint.cc
- port.cc
- # port_isolated_runtime_environment.cc
- port_server_client.cc
- # reconnect_server.cc
- # run_with_poller.sh*
- # slice_splitter.cc
- # subprocess_posix.cc
- # subprocess_windows.cc
- test_config.cc
- test_lb_policies.cc
- # test_tcp_server.cc
- # tracer_util.cc
- # trickle_endpoint.cc
-)
-
-END()
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ # cmdline.cc
+ # cmdline_test.cc
+ # debugger_macros.cc
+ # fuzzer_corpus_test.cc
+ # fuzzer_one_entry_runner.sh*
+ # fuzzer_util.cc
+ # grpc_fuzzer.bzl
+ # grpc_profiler.cc
+ # histogram.cc
+ # histogram_test.cc
+ # lsan_suppressions.txt
+ # memory_counters.cc
+ # mock_endpoint.cc
+ # one_corpus_entry_fuzzer.cc
+ # parse_hexstring.cc
+ # passthru_endpoint.cc
+ port.cc
+ # port_isolated_runtime_environment.cc
+ port_server_client.cc
+ # reconnect_server.cc
+ # run_with_poller.sh*
+ # slice_splitter.cc
+ # subprocess_posix.cc
+ # subprocess_windows.cc
+ test_config.cc
+ test_lb_policies.cc
+ # test_tcp_server.cc
+ # tracer_util.cc
+ # trickle_endpoint.cc
+)
+
+END()
diff --git a/contrib/libs/grpc/test/cpp/README-iOS.md b/contrib/libs/grpc/test/cpp/README-iOS.md
index faef8aaf5f..898931085b 100644
--- a/contrib/libs/grpc/test/cpp/README-iOS.md
+++ b/contrib/libs/grpc/test/cpp/README-iOS.md
@@ -1,52 +1,52 @@
-## C++ tests on iOS
-
-[GTMGoogleTestRunner](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm) is used to convert googletest cases to XCTest that can be run on iOS. GTMGoogleTestRunner doesn't execute the `main` function, so we can't have any test logic in `main`.
-However, it's ok to call `::testing::InitGoogleTest` in `main`, as `GTMGoogleTestRunner` [calls InitGoogleTest](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm#L151).
-`grpc::testing::TestEnvironment` can also be called from `main`, as it does some test initialization (install crash handler, seed RNG) that's not strictly required to run testcases on iOS.
-
-
-## Porting exising C++ tests to run on iOS
-
-Please follow these guidelines when porting tests to run on iOS:
-
-- Tests need to use the googletest framework
-- Any setup/teardown code in `main` needs to be moved to `SetUpTestCase`/`TearDownTestCase`, and `TEST` needs to be changed to `TEST_F`.
-- [Death tests](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#death-tests) are not supported on iOS, so use the `*_IF_SUPPORTED()` macros to ensure that your code compiles on iOS.
-
-For example, the following test
-```c++
-TEST(MyTest, TestOne) {
- ASSERT_DEATH(ThisShouldDie(), "");
-}
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- grpc_init();
- return RUN_ALL_TESTS();
- grpc_shutdown();
-}
-```
-
-should be changed to
-```c++
-class MyTest : public ::testing::Test {
- protected:
- static void SetUpTestCase() { grpc_init(); }
- static void TearDownTestCase() { grpc_shutdown(); }
-};
-
-TEST_F(MyTest, TestOne) {
- ASSERT_DEATH_IF_SUPPORTED(ThisShouldDie(), "");
-}
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
-```
-
-## Limitations
-
-Due to a [limitation](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm#L48-L56) in GTMGoogleTestRunner, `SetUpTestCase`/`TeardownTestCase` will be called before/after *every* individual test case, similar to `SetUp`/`TearDown`.
+## C++ tests on iOS
+
+[GTMGoogleTestRunner](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm) is used to convert googletest cases to XCTest that can be run on iOS. GTMGoogleTestRunner doesn't execute the `main` function, so we can't have any test logic in `main`.
+However, it's ok to call `::testing::InitGoogleTest` in `main`, as `GTMGoogleTestRunner` [calls InitGoogleTest](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm#L151).
+`grpc::testing::TestEnvironment` can also be called from `main`, as it does some test initialization (install crash handler, seed RNG) that's not strictly required to run testcases on iOS.
+
+
+## Porting exising C++ tests to run on iOS
+
+Please follow these guidelines when porting tests to run on iOS:
+
+- Tests need to use the googletest framework
+- Any setup/teardown code in `main` needs to be moved to `SetUpTestCase`/`TearDownTestCase`, and `TEST` needs to be changed to `TEST_F`.
+- [Death tests](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#death-tests) are not supported on iOS, so use the `*_IF_SUPPORTED()` macros to ensure that your code compiles on iOS.
+
+For example, the following test
+```c++
+TEST(MyTest, TestOne) {
+ ASSERT_DEATH(ThisShouldDie(), "");
+}
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ grpc_init();
+ return RUN_ALL_TESTS();
+ grpc_shutdown();
+}
+```
+
+should be changed to
+```c++
+class MyTest : public ::testing::Test {
+ protected:
+ static void SetUpTestCase() { grpc_init(); }
+ static void TearDownTestCase() { grpc_shutdown(); }
+};
+
+TEST_F(MyTest, TestOne) {
+ ASSERT_DEATH_IF_SUPPORTED(ThisShouldDie(), "");
+}
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+```
+
+## Limitations
+
+Due to a [limitation](https://github.com/google/google-toolbox-for-mac/blob/master/UnitTesting/GTMGoogleTestRunner.mm#L48-L56) in GTMGoogleTestRunner, `SetUpTestCase`/`TeardownTestCase` will be called before/after *every* individual test case, similar to `SetUp`/`TearDown`.
diff --git a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
index 85d8db87dc..45df8718f9 100644
--- a/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/async_end2end_test.cc
@@ -1,360 +1,360 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <cinttypes>
-#include <memory>
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/ext/health_check_service_server_builder_option.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/ext/filters/client_channel/backup_poller.h"
-#include "src/core/lib/gpr/tls.h"
-#include "src/core/lib/iomgr/port.h"
-#include "src/proto/grpc/health/v1/health.grpc.pb.h"
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/string_ref_helper.h"
-#include "test/cpp/util/test_credentials_provider.h"
-
-#ifdef GRPC_POSIX_SOCKET_EV
-#include "src/core/lib/iomgr/ev_posix.h"
-#endif // GRPC_POSIX_SOCKET_EV
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using grpc::testing::kTlsCredentialsType;
-using std::chrono::system_clock;
-
-namespace grpc {
-namespace testing {
-
-namespace {
-
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
-int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
-
-class Verifier {
- public:
- Verifier() : lambda_run_(false) {}
- // Expect sets the expected ok value for a specific tag
- Verifier& Expect(int i, bool expect_ok) {
- return ExpectUnless(i, expect_ok, false);
- }
- // ExpectUnless sets the expected ok value for a specific tag
- // unless the tag was already marked seen (as a result of ExpectMaybe)
- Verifier& ExpectUnless(int i, bool expect_ok, bool seen) {
- if (!seen) {
- expectations_[tag(i)] = expect_ok;
- }
- return *this;
- }
- // ExpectMaybe sets the expected ok value for a specific tag, but does not
- // require it to appear
- // If it does, sets *seen to true
- Verifier& ExpectMaybe(int i, bool expect_ok, bool* seen) {
- if (!*seen) {
- maybe_expectations_[tag(i)] = MaybeExpect{expect_ok, seen};
- }
- return *this;
- }
-
- // Next waits for 1 async tag to complete, checks its
- // expectations, and returns the tag
- int Next(CompletionQueue* cq, bool ignore_ok) {
- bool ok;
- void* got_tag;
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- GotTag(got_tag, ok, ignore_ok);
- return detag(got_tag);
- }
-
- template <typename T>
- CompletionQueue::NextStatus DoOnceThenAsyncNext(
- CompletionQueue* cq, void** got_tag, bool* ok, T deadline,
- std::function<void(void)> lambda) {
- if (lambda_run_) {
- return cq->AsyncNext(got_tag, ok, deadline);
- } else {
- lambda_run_ = true;
- return cq->DoThenAsyncNext(lambda, got_tag, ok, deadline);
- }
- }
-
- // Verify keeps calling Next until all currently set
- // expected tags are complete
- void Verify(CompletionQueue* cq) { Verify(cq, false); }
-
- // This version of Verify allows optionally ignoring the
- // outcome of the expectation
- void Verify(CompletionQueue* cq, bool ignore_ok) {
- GPR_ASSERT(!expectations_.empty() || !maybe_expectations_.empty());
- while (!expectations_.empty()) {
- Next(cq, ignore_ok);
- }
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <cinttypes>
+#include <memory>
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/ext/health_check_service_server_builder_option.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/ext/filters/client_channel/backup_poller.h"
+#include "src/core/lib/gpr/tls.h"
+#include "src/core/lib/iomgr/port.h"
+#include "src/proto/grpc/health/v1/health.grpc.pb.h"
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/string_ref_helper.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+#ifdef GRPC_POSIX_SOCKET_EV
+#include "src/core/lib/iomgr/ev_posix.h"
+#endif // GRPC_POSIX_SOCKET_EV
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using grpc::testing::kTlsCredentialsType;
+using std::chrono::system_clock;
+
+namespace grpc {
+namespace testing {
+
+namespace {
+
+void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
+
+class Verifier {
+ public:
+ Verifier() : lambda_run_(false) {}
+ // Expect sets the expected ok value for a specific tag
+ Verifier& Expect(int i, bool expect_ok) {
+ return ExpectUnless(i, expect_ok, false);
+ }
+ // ExpectUnless sets the expected ok value for a specific tag
+ // unless the tag was already marked seen (as a result of ExpectMaybe)
+ Verifier& ExpectUnless(int i, bool expect_ok, bool seen) {
+ if (!seen) {
+ expectations_[tag(i)] = expect_ok;
+ }
+ return *this;
+ }
+ // ExpectMaybe sets the expected ok value for a specific tag, but does not
+ // require it to appear
+ // If it does, sets *seen to true
+ Verifier& ExpectMaybe(int i, bool expect_ok, bool* seen) {
+ if (!*seen) {
+ maybe_expectations_[tag(i)] = MaybeExpect{expect_ok, seen};
+ }
+ return *this;
+ }
+
+ // Next waits for 1 async tag to complete, checks its
+ // expectations, and returns the tag
+ int Next(CompletionQueue* cq, bool ignore_ok) {
+ bool ok;
+ void* got_tag;
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ GotTag(got_tag, ok, ignore_ok);
+ return detag(got_tag);
+ }
+
+ template <typename T>
+ CompletionQueue::NextStatus DoOnceThenAsyncNext(
+ CompletionQueue* cq, void** got_tag, bool* ok, T deadline,
+ std::function<void(void)> lambda) {
+ if (lambda_run_) {
+ return cq->AsyncNext(got_tag, ok, deadline);
+ } else {
+ lambda_run_ = true;
+ return cq->DoThenAsyncNext(lambda, got_tag, ok, deadline);
+ }
+ }
+
+ // Verify keeps calling Next until all currently set
+ // expected tags are complete
+ void Verify(CompletionQueue* cq) { Verify(cq, false); }
+
+ // This version of Verify allows optionally ignoring the
+ // outcome of the expectation
+ void Verify(CompletionQueue* cq, bool ignore_ok) {
+ GPR_ASSERT(!expectations_.empty() || !maybe_expectations_.empty());
+ while (!expectations_.empty()) {
+ Next(cq, ignore_ok);
+ }
maybe_expectations_.clear();
- }
-
- // This version of Verify stops after a certain deadline
- void Verify(CompletionQueue* cq,
- std::chrono::system_clock::time_point deadline) {
- if (expectations_.empty()) {
- bool ok;
- void* got_tag;
- EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
- CompletionQueue::TIMEOUT);
- } else {
- while (!expectations_.empty()) {
- bool ok;
- void* got_tag;
- EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
- CompletionQueue::GOT_EVENT);
- GotTag(got_tag, ok, false);
- }
- }
+ }
+
+ // This version of Verify stops after a certain deadline
+ void Verify(CompletionQueue* cq,
+ std::chrono::system_clock::time_point deadline) {
+ if (expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
+ CompletionQueue::TIMEOUT);
+ } else {
+ while (!expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
+ CompletionQueue::GOT_EVENT);
+ GotTag(got_tag, ok, false);
+ }
+ }
maybe_expectations_.clear();
- }
-
- // This version of Verify stops after a certain deadline, and uses the
- // DoThenAsyncNext API
- // to call the lambda
- void Verify(CompletionQueue* cq,
- std::chrono::system_clock::time_point deadline,
- const std::function<void(void)>& lambda) {
- if (expectations_.empty()) {
- bool ok;
- void* got_tag;
- EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
- CompletionQueue::TIMEOUT);
- } else {
- while (!expectations_.empty()) {
- bool ok;
- void* got_tag;
- EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
- CompletionQueue::GOT_EVENT);
- GotTag(got_tag, ok, false);
- }
- }
+ }
+
+ // This version of Verify stops after a certain deadline, and uses the
+ // DoThenAsyncNext API
+ // to call the lambda
+ void Verify(CompletionQueue* cq,
+ std::chrono::system_clock::time_point deadline,
+ const std::function<void(void)>& lambda) {
+ if (expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::TIMEOUT);
+ } else {
+ while (!expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::GOT_EVENT);
+ GotTag(got_tag, ok, false);
+ }
+ }
maybe_expectations_.clear();
- }
-
- private:
- void GotTag(void* got_tag, bool ok, bool ignore_ok) {
- auto it = expectations_.find(got_tag);
- if (it != expectations_.end()) {
- if (!ignore_ok) {
- EXPECT_EQ(it->second, ok);
- }
- expectations_.erase(it);
- } else {
- auto it2 = maybe_expectations_.find(got_tag);
- if (it2 != maybe_expectations_.end()) {
- if (it2->second.seen != nullptr) {
- EXPECT_FALSE(*it2->second.seen);
- *it2->second.seen = true;
- }
- if (!ignore_ok) {
- EXPECT_EQ(it2->second.ok, ok);
- }
+ }
+
+ private:
+ void GotTag(void* got_tag, bool ok, bool ignore_ok) {
+ auto it = expectations_.find(got_tag);
+ if (it != expectations_.end()) {
+ if (!ignore_ok) {
+ EXPECT_EQ(it->second, ok);
+ }
+ expectations_.erase(it);
+ } else {
+ auto it2 = maybe_expectations_.find(got_tag);
+ if (it2 != maybe_expectations_.end()) {
+ if (it2->second.seen != nullptr) {
+ EXPECT_FALSE(*it2->second.seen);
+ *it2->second.seen = true;
+ }
+ if (!ignore_ok) {
+ EXPECT_EQ(it2->second.ok, ok);
+ }
maybe_expectations_.erase(it2);
- } else {
- gpr_log(GPR_ERROR, "Unexpected tag: %p", got_tag);
- abort();
- }
- }
- }
-
- struct MaybeExpect {
- bool ok;
- bool* seen;
- };
-
- std::map<void*, bool> expectations_;
- std::map<void*, MaybeExpect> maybe_expectations_;
- bool lambda_run_;
-};
-
-bool plugin_has_sync_methods(std::unique_ptr<ServerBuilderPlugin>& plugin) {
- return plugin->has_sync_methods();
-}
-
-// This class disables the server builder plugins that may add sync services to
-// the server. If there are sync services, UnimplementedRpc test will triger
-// the sync unknown rpc routine on the server side, rather than the async one
-// that needs to be tested here.
-class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption {
- public:
- void UpdateArguments(ChannelArguments* /*arg*/) override {}
-
- void UpdatePlugins(
- std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins) override {
- plugins->erase(std::remove_if(plugins->begin(), plugins->end(),
- plugin_has_sync_methods),
- plugins->end());
- }
-};
-
-class TestScenario {
- public:
+ } else {
+ gpr_log(GPR_ERROR, "Unexpected tag: %p", got_tag);
+ abort();
+ }
+ }
+ }
+
+ struct MaybeExpect {
+ bool ok;
+ bool* seen;
+ };
+
+ std::map<void*, bool> expectations_;
+ std::map<void*, MaybeExpect> maybe_expectations_;
+ bool lambda_run_;
+};
+
+bool plugin_has_sync_methods(std::unique_ptr<ServerBuilderPlugin>& plugin) {
+ return plugin->has_sync_methods();
+}
+
+// This class disables the server builder plugins that may add sync services to
+// the server. If there are sync services, UnimplementedRpc test will triger
+// the sync unknown rpc routine on the server side, rather than the async one
+// that needs to be tested here.
+class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption {
+ public:
+ void UpdateArguments(ChannelArguments* /*arg*/) override {}
+
+ void UpdatePlugins(
+ std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins) override {
+ plugins->erase(std::remove_if(plugins->begin(), plugins->end(),
+ plugin_has_sync_methods),
+ plugins->end());
+ }
+};
+
+class TestScenario {
+ public:
TestScenario(bool inproc_stub, const TString& creds_type, bool hcs,
const TString& content)
- : inproc(inproc_stub),
- health_check_service(hcs),
- credentials_type(creds_type),
- message_content(content) {}
- void Log() const;
- bool inproc;
- bool health_check_service;
+ : inproc(inproc_stub),
+ health_check_service(hcs),
+ credentials_type(creds_type),
+ message_content(content) {}
+ void Log() const;
+ bool inproc;
+ bool health_check_service;
const TString credentials_type;
const TString message_content;
-};
-
-static std::ostream& operator<<(std::ostream& out,
- const TestScenario& scenario) {
- return out << "TestScenario{inproc=" << (scenario.inproc ? "true" : "false")
- << ", credentials='" << scenario.credentials_type
- << ", health_check_service="
- << (scenario.health_check_service ? "true" : "false")
- << "', message_size=" << scenario.message_content.size() << "}";
-}
-
-void TestScenario::Log() const {
- std::ostringstream out;
- out << *this;
- gpr_log(GPR_DEBUG, "%s", out.str().c_str());
-}
-
-class HealthCheck : public health::v1::Health::Service {};
-
-class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
- protected:
- AsyncEnd2endTest() { GetParam().Log(); }
-
- void SetUp() override {
- port_ = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port_;
-
- // Setup server
- BuildAndStartServer();
- }
-
- void TearDown() override {
- server_->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- cq_->Shutdown();
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
- stub_.reset();
- grpc_recycle_unused_port(port_);
- }
-
- void BuildAndStartServer() {
- ServerBuilder builder;
- auto server_creds = GetCredentialsProvider()->GetServerCredentials(
- GetParam().credentials_type);
- builder.AddListeningPort(server_address_.str(), server_creds);
- service_.reset(new grpc::testing::EchoTestService::AsyncService());
- builder.RegisterService(service_.get());
- if (GetParam().health_check_service) {
- builder.RegisterService(&health_check_);
- }
- cq_ = builder.AddCompletionQueue();
-
- // TODO(zyc): make a test option to choose wheather sync plugins should be
- // deleted
- std::unique_ptr<ServerBuilderOption> sync_plugin_disabler(
- new ServerBuilderSyncPluginDisabler());
- builder.SetOption(move(sync_plugin_disabler));
- server_ = builder.BuildAndStart();
- }
-
- void ResetStub() {
- ChannelArguments args;
- auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
- GetParam().credentials_type, &args);
- std::shared_ptr<Channel> channel =
- !(GetParam().inproc) ? ::grpc::CreateCustomChannel(
- server_address_.str(), channel_creds, args)
- : server_->InProcessChannel(args);
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- void SendRpc(int num_rpcs) {
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer,
- cq_.get(), cq_.get(), tag(2));
-
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
- }
-
- std::unique_ptr<ServerCompletionQueue> cq_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::unique_ptr<grpc::testing::EchoTestService::AsyncService> service_;
- HealthCheck health_check_;
- std::ostringstream server_address_;
- int port_;
-};
-
-TEST_P(AsyncEnd2endTest, SimpleRpc) {
- ResetStub();
- SendRpc(1);
-}
-
+};
+
+static std::ostream& operator<<(std::ostream& out,
+ const TestScenario& scenario) {
+ return out << "TestScenario{inproc=" << (scenario.inproc ? "true" : "false")
+ << ", credentials='" << scenario.credentials_type
+ << ", health_check_service="
+ << (scenario.health_check_service ? "true" : "false")
+ << "', message_size=" << scenario.message_content.size() << "}";
+}
+
+void TestScenario::Log() const {
+ std::ostringstream out;
+ out << *this;
+ gpr_log(GPR_DEBUG, "%s", out.str().c_str());
+}
+
+class HealthCheck : public health::v1::Health::Service {};
+
+class AsyncEnd2endTest : public ::testing::TestWithParam<TestScenario> {
+ protected:
+ AsyncEnd2endTest() { GetParam().Log(); }
+
+ void SetUp() override {
+ port_ = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port_;
+
+ // Setup server
+ BuildAndStartServer();
+ }
+
+ void TearDown() override {
+ server_->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ cq_->Shutdown();
+ while (cq_->Next(&ignored_tag, &ignored_ok))
+ ;
+ stub_.reset();
+ grpc_recycle_unused_port(port_);
+ }
+
+ void BuildAndStartServer() {
+ ServerBuilder builder;
+ auto server_creds = GetCredentialsProvider()->GetServerCredentials(
+ GetParam().credentials_type);
+ builder.AddListeningPort(server_address_.str(), server_creds);
+ service_.reset(new grpc::testing::EchoTestService::AsyncService());
+ builder.RegisterService(service_.get());
+ if (GetParam().health_check_service) {
+ builder.RegisterService(&health_check_);
+ }
+ cq_ = builder.AddCompletionQueue();
+
+ // TODO(zyc): make a test option to choose wheather sync plugins should be
+ // deleted
+ std::unique_ptr<ServerBuilderOption> sync_plugin_disabler(
+ new ServerBuilderSyncPluginDisabler());
+ builder.SetOption(move(sync_plugin_disabler));
+ server_ = builder.BuildAndStart();
+ }
+
+ void ResetStub() {
+ ChannelArguments args;
+ auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
+ GetParam().credentials_type, &args);
+ std::shared_ptr<Channel> channel =
+ !(GetParam().inproc) ? ::grpc::CreateCustomChannel(
+ server_address_.str(), channel_creds, args)
+ : server_->InProcessChannel(args);
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ void SendRpc(int num_rpcs) {
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer,
+ cq_.get(), cq_.get(), tag(2));
+
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+ }
+
+ std::unique_ptr<ServerCompletionQueue> cq_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::unique_ptr<grpc::testing::EchoTestService::AsyncService> service_;
+ HealthCheck health_check_;
+ std::ostringstream server_address_;
+ int port_;
+};
+
+TEST_P(AsyncEnd2endTest, SimpleRpc) {
+ ResetStub();
+ SendRpc(1);
+}
+
TEST_P(AsyncEnd2endTest, SimpleRpcWithExpectedError) {
ResetStub();
@@ -401,1509 +401,1509 @@ TEST_P(AsyncEnd2endTest, SimpleRpcWithExpectedError) {
EXPECT_FALSE(srv_ctx.IsCancelled());
}
-TEST_P(AsyncEnd2endTest, SequentialRpcs) {
- ResetStub();
- SendRpc(10);
-}
-
-TEST_P(AsyncEnd2endTest, ReconnectChannel) {
- // GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS is set to 100ms in main()
- if (GetParam().inproc) {
- return;
- }
- int poller_slowdown_factor = 1;
-#ifdef GRPC_POSIX_SOCKET_EV
- // It needs 2 pollset_works to reconnect the channel with polling engine
- // "poll"
- grpc_core::UniquePtr<char> poller = GPR_GLOBAL_CONFIG_GET(grpc_poll_strategy);
- if (0 == strcmp(poller.get(), "poll")) {
- poller_slowdown_factor = 2;
- }
-#endif // GRPC_POSIX_SOCKET_EV
- ResetStub();
- SendRpc(1);
- server_->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- cq_->Shutdown();
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
- BuildAndStartServer();
- // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
- // reconnect the channel.
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(
- 300 * poller_slowdown_factor * grpc_test_slowdown_factor(),
- GPR_TIMESPAN)));
- SendRpc(1);
-}
-
-// We do not need to protect notify because the use is synchronized.
-void ServerWait(Server* server, int* notify) {
- server->Wait();
- *notify = 1;
-}
-TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
- int notify = 0;
- std::thread wait_thread(&ServerWait, server_.get(), &notify);
- ResetStub();
- SendRpc(1);
- EXPECT_EQ(0, notify);
- server_->Shutdown();
- wait_thread.join();
- EXPECT_EQ(1, notify);
-}
-
-TEST_P(AsyncEnd2endTest, ShutdownThenWait) {
- ResetStub();
- SendRpc(1);
- std::thread t([this]() { server_->Shutdown(); });
- server_->Wait();
- t.join();
-}
-
-// Test a simple RPC using the async version of Next
-TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
-
- std::chrono::system_clock::time_point time_now(
- std::chrono::system_clock::now());
- std::chrono::system_clock::time_point time_limit(
- std::chrono::system_clock::now() + std::chrono::seconds(10));
- Verifier().Verify(cq_.get(), time_now);
- Verifier().Verify(cq_.get(), time_now);
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- Verifier().Expect(2, true).Verify(cq_.get(), time_limit);
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(
- cq_.get(), std::chrono::system_clock::time_point::max());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-// Test a simple RPC using the async version of Next
-TEST_P(AsyncEnd2endTest, DoThenAsyncNextRpc) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
-
- std::chrono::system_clock::time_point time_now(
- std::chrono::system_clock::now());
- std::chrono::system_clock::time_point time_limit(
- std::chrono::system_clock::now() + std::chrono::seconds(10));
- Verifier().Verify(cq_.get(), time_now);
- Verifier().Verify(cq_.get(), time_now);
-
- auto resp_writer_ptr = &response_writer;
- auto lambda_2 = [&, this, resp_writer_ptr]() {
- service_->RequestEcho(&srv_ctx, &recv_request, resp_writer_ptr, cq_.get(),
- cq_.get(), tag(2));
- };
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- Verifier().Expect(2, true).Verify(cq_.get(), time_limit, lambda_2);
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- auto lambda_3 = [resp_writer_ptr, send_response]() {
- resp_writer_ptr->Finish(send_response, Status::OK, tag(3));
- };
- Verifier().Expect(3, true).Expect(4, true).Verify(
- cq_.get(), std::chrono::system_clock::time_point::max(), lambda_3);
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-// Two pings and a final pong.
-TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
- stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
-
- service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get());
-
- cli_stream->Write(send_request, tag(3));
- srv_stream.Read(&recv_request, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- cli_stream->Write(send_request, tag(5));
- srv_stream.Read(&recv_request, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
-
- EXPECT_EQ(send_request.message(), recv_request.message());
- cli_stream->WritesDone(tag(7));
- srv_stream.Read(&recv_request, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- send_response.set_message(recv_request.message());
- srv_stream.Finish(send_response, Status::OK, tag(9));
- cli_stream->Finish(&recv_status, tag(10));
- Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-// Two pings and a final pong.
-TEST_P(AsyncEnd2endTest, SimpleClientStreamingWithCoalescingApi) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- cli_ctx.set_initial_metadata_corked(true);
- // tag:1 never comes up since no op is performed
- std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
- stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
-
- service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- cli_stream->Write(send_request, tag(3));
-
- bool seen3 = false;
-
- Verifier().Expect(2, true).ExpectMaybe(3, true, &seen3).Verify(cq_.get());
-
- srv_stream.Read(&recv_request, tag(4));
-
- Verifier().ExpectUnless(3, true, seen3).Expect(4, true).Verify(cq_.get());
-
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- cli_stream->WriteLast(send_request, WriteOptions(), tag(5));
- srv_stream.Read(&recv_request, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- srv_stream.Read(&recv_request, tag(7));
- Verifier().Expect(7, false).Verify(cq_.get());
-
- send_response.set_message(recv_request.message());
- srv_stream.Finish(send_response, Status::OK, tag(8));
- cli_stream->Finish(&recv_status, tag(9));
- Verifier().Expect(8, true).Expect(9, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-// One ping, two pongs.
-TEST_P(AsyncEnd2endTest, SimpleServerStreaming) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
-
- service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- srv_stream.Write(send_response, tag(3));
- cli_stream->Read(&recv_response, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- srv_stream.Write(send_response, tag(5));
- cli_stream->Read(&recv_response, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- srv_stream.Finish(Status::OK, tag(7));
- cli_stream->Read(&recv_response, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status, tag(9));
- Verifier().Expect(9, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// One ping, two pongs. Using WriteAndFinish API
-TEST_P(AsyncEnd2endTest, SimpleServerStreamingWithCoalescingApiWAF) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
-
- service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- srv_stream.Write(send_response, tag(3));
- cli_stream->Read(&recv_response, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- srv_stream.WriteAndFinish(send_response, WriteOptions(), Status::OK, tag(5));
- cli_stream->Read(&recv_response, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->Read(&recv_response, tag(7));
- Verifier().Expect(7, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status, tag(8));
- Verifier().Expect(8, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// One ping, two pongs. Using WriteLast API
-TEST_P(AsyncEnd2endTest, SimpleServerStreamingWithCoalescingApiWL) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
-
- service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- srv_stream.Write(send_response, tag(3));
- cli_stream->Read(&recv_response, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- srv_stream.WriteLast(send_response, WriteOptions(), tag(5));
- cli_stream->Read(&recv_response, tag(6));
- srv_stream.Finish(Status::OK, tag(7));
- Verifier().Expect(5, true).Expect(6, true).Expect(7, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->Read(&recv_response, tag(8));
- Verifier().Expect(8, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status, tag(9));
- Verifier().Expect(9, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// One ping, one pong.
-TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
- cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
-
- service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
-
- cli_stream->Write(send_request, tag(3));
- srv_stream.Read(&recv_request, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- srv_stream.Write(send_response, tag(5));
- cli_stream->Read(&recv_response, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->WritesDone(tag(7));
- srv_stream.Read(&recv_request, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- srv_stream.Finish(Status::OK, tag(9));
- cli_stream->Finish(&recv_status, tag(10));
- Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// One ping, one pong. Using server:WriteAndFinish api
-TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWAF) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- cli_ctx.set_initial_metadata_corked(true);
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
- cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
-
- service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- cli_stream->WriteLast(send_request, WriteOptions(), tag(3));
-
- bool seen3 = false;
-
- Verifier().Expect(2, true).ExpectMaybe(3, true, &seen3).Verify(cq_.get());
-
- srv_stream.Read(&recv_request, tag(4));
-
- Verifier().ExpectUnless(3, true, seen3).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- srv_stream.Read(&recv_request, tag(5));
- Verifier().Expect(5, false).Verify(cq_.get());
-
- send_response.set_message(recv_request.message());
- srv_stream.WriteAndFinish(send_response, WriteOptions(), Status::OK, tag(6));
- cli_stream->Read(&recv_response, tag(7));
- Verifier().Expect(6, true).Expect(7, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->Finish(&recv_status, tag(8));
- Verifier().Expect(8, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// One ping, one pong. Using server:WriteLast api
-TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWL) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- cli_ctx.set_initial_metadata_corked(true);
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
- cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
-
- service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- cli_stream->WriteLast(send_request, WriteOptions(), tag(3));
-
- bool seen3 = false;
-
- Verifier().Expect(2, true).ExpectMaybe(3, true, &seen3).Verify(cq_.get());
-
- srv_stream.Read(&recv_request, tag(4));
-
- Verifier().ExpectUnless(3, true, seen3).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- srv_stream.Read(&recv_request, tag(5));
- Verifier().Expect(5, false).Verify(cq_.get());
-
- send_response.set_message(recv_request.message());
- srv_stream.WriteLast(send_response, WriteOptions(), tag(6));
- srv_stream.Finish(Status::OK, tag(7));
- cli_stream->Read(&recv_response, tag(8));
- Verifier().Expect(6, true).Expect(7, true).Expect(8, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->Finish(&recv_status, tag(9));
- Verifier().Expect(9, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// Metadata tests
-TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
+TEST_P(AsyncEnd2endTest, SequentialRpcs) {
+ ResetStub();
+ SendRpc(10);
+}
+
+TEST_P(AsyncEnd2endTest, ReconnectChannel) {
+ // GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS is set to 100ms in main()
+ if (GetParam().inproc) {
+ return;
+ }
+ int poller_slowdown_factor = 1;
+#ifdef GRPC_POSIX_SOCKET_EV
+ // It needs 2 pollset_works to reconnect the channel with polling engine
+ // "poll"
+ grpc_core::UniquePtr<char> poller = GPR_GLOBAL_CONFIG_GET(grpc_poll_strategy);
+ if (0 == strcmp(poller.get(), "poll")) {
+ poller_slowdown_factor = 2;
+ }
+#endif // GRPC_POSIX_SOCKET_EV
+ ResetStub();
+ SendRpc(1);
+ server_->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ cq_->Shutdown();
+ while (cq_->Next(&ignored_tag, &ignored_ok))
+ ;
+ BuildAndStartServer();
+ // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
+ // reconnect the channel.
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(
+ 300 * poller_slowdown_factor * grpc_test_slowdown_factor(),
+ GPR_TIMESPAN)));
+ SendRpc(1);
+}
+
+// We do not need to protect notify because the use is synchronized.
+void ServerWait(Server* server, int* notify) {
+ server->Wait();
+ *notify = 1;
+}
+TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
+ int notify = 0;
+ std::thread wait_thread(&ServerWait, server_.get(), &notify);
+ ResetStub();
+ SendRpc(1);
+ EXPECT_EQ(0, notify);
+ server_->Shutdown();
+ wait_thread.join();
+ EXPECT_EQ(1, notify);
+}
+
+TEST_P(AsyncEnd2endTest, ShutdownThenWait) {
+ ResetStub();
+ SendRpc(1);
+ std::thread t([this]() { server_->Shutdown(); });
+ server_->Wait();
+ t.join();
+}
+
+// Test a simple RPC using the async version of Next
+TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+
+ std::chrono::system_clock::time_point time_now(
+ std::chrono::system_clock::now());
+ std::chrono::system_clock::time_point time_limit(
+ std::chrono::system_clock::now() + std::chrono::seconds(10));
+ Verifier().Verify(cq_.get(), time_now);
+ Verifier().Verify(cq_.get(), time_now);
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ Verifier().Expect(2, true).Verify(cq_.get(), time_limit);
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(
+ cq_.get(), std::chrono::system_clock::time_point::max());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// Test a simple RPC using the async version of Next
+TEST_P(AsyncEnd2endTest, DoThenAsyncNextRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+
+ std::chrono::system_clock::time_point time_now(
+ std::chrono::system_clock::now());
+ std::chrono::system_clock::time_point time_limit(
+ std::chrono::system_clock::now() + std::chrono::seconds(10));
+ Verifier().Verify(cq_.get(), time_now);
+ Verifier().Verify(cq_.get(), time_now);
+
+ auto resp_writer_ptr = &response_writer;
+ auto lambda_2 = [&, this, resp_writer_ptr]() {
+ service_->RequestEcho(&srv_ctx, &recv_request, resp_writer_ptr, cq_.get(),
+ cq_.get(), tag(2));
+ };
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ Verifier().Expect(2, true).Verify(cq_.get(), time_limit, lambda_2);
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ auto lambda_3 = [resp_writer_ptr, send_response]() {
+ resp_writer_ptr->Finish(send_response, Status::OK, tag(3));
+ };
+ Verifier().Expect(3, true).Expect(4, true).Verify(
+ cq_.get(), std::chrono::system_clock::time_point::max(), lambda_3);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// Two pings and a final pong.
+TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
+ stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
+
+ service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get());
+
+ cli_stream->Write(send_request, tag(3));
+ srv_stream.Read(&recv_request, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ cli_stream->Write(send_request, tag(5));
+ srv_stream.Read(&recv_request, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_request.message(), recv_request.message());
+ cli_stream->WritesDone(tag(7));
+ srv_stream.Read(&recv_request, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Finish(send_response, Status::OK, tag(9));
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// Two pings and a final pong.
+TEST_P(AsyncEnd2endTest, SimpleClientStreamingWithCoalescingApi) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ cli_ctx.set_initial_metadata_corked(true);
+ // tag:1 never comes up since no op is performed
+ std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
+ stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
+
+ service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ cli_stream->Write(send_request, tag(3));
+
+ bool seen3 = false;
+
+ Verifier().Expect(2, true).ExpectMaybe(3, true, &seen3).Verify(cq_.get());
+
+ srv_stream.Read(&recv_request, tag(4));
+
+ Verifier().ExpectUnless(3, true, seen3).Expect(4, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ cli_stream->WriteLast(send_request, WriteOptions(), tag(5));
+ srv_stream.Read(&recv_request, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ srv_stream.Read(&recv_request, tag(7));
+ Verifier().Expect(7, false).Verify(cq_.get());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Finish(send_response, Status::OK, tag(8));
+ cli_stream->Finish(&recv_status, tag(9));
+ Verifier().Expect(8, true).Expect(9, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// One ping, two pongs.
+TEST_P(AsyncEnd2endTest, SimpleServerStreaming) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
+
+ service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Write(send_response, tag(3));
+ cli_stream->Read(&recv_response, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ srv_stream.Write(send_response, tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ srv_stream.Finish(Status::OK, tag(7));
+ cli_stream->Read(&recv_response, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(9));
+ Verifier().Expect(9, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// One ping, two pongs. Using WriteAndFinish API
+TEST_P(AsyncEnd2endTest, SimpleServerStreamingWithCoalescingApiWAF) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
+
+ service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Write(send_response, tag(3));
+ cli_stream->Read(&recv_response, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ srv_stream.WriteAndFinish(send_response, WriteOptions(), Status::OK, tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->Read(&recv_response, tag(7));
+ Verifier().Expect(7, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(8));
+ Verifier().Expect(8, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// One ping, two pongs. Using WriteLast API
+TEST_P(AsyncEnd2endTest, SimpleServerStreamingWithCoalescingApiWL) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
+
+ service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Write(send_response, tag(3));
+ cli_stream->Read(&recv_response, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ srv_stream.WriteLast(send_response, WriteOptions(), tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ srv_stream.Finish(Status::OK, tag(7));
+ Verifier().Expect(5, true).Expect(6, true).Expect(7, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->Read(&recv_response, tag(8));
+ Verifier().Expect(8, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(9));
+ Verifier().Expect(9, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// One ping, one pong.
+TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
+
+ service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+
+ cli_stream->Write(send_request, tag(3));
+ srv_stream.Read(&recv_request, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Write(send_response, tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->WritesDone(tag(7));
+ srv_stream.Read(&recv_request, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ srv_stream.Finish(Status::OK, tag(9));
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// One ping, one pong. Using server:WriteAndFinish api
+TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWAF) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ cli_ctx.set_initial_metadata_corked(true);
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
+
+ service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ cli_stream->WriteLast(send_request, WriteOptions(), tag(3));
+
+ bool seen3 = false;
+
+ Verifier().Expect(2, true).ExpectMaybe(3, true, &seen3).Verify(cq_.get());
+
+ srv_stream.Read(&recv_request, tag(4));
+
+ Verifier().ExpectUnless(3, true, seen3).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ srv_stream.Read(&recv_request, tag(5));
+ Verifier().Expect(5, false).Verify(cq_.get());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.WriteAndFinish(send_response, WriteOptions(), Status::OK, tag(6));
+ cli_stream->Read(&recv_response, tag(7));
+ Verifier().Expect(6, true).Expect(7, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->Finish(&recv_status, tag(8));
+ Verifier().Expect(8, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// One ping, one pong. Using server:WriteLast api
+TEST_P(AsyncEnd2endTest, SimpleBidiStreamingWithCoalescingApiWL) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ cli_ctx.set_initial_metadata_corked(true);
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
+
+ service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ cli_stream->WriteLast(send_request, WriteOptions(), tag(3));
+
+ bool seen3 = false;
+
+ Verifier().Expect(2, true).ExpectMaybe(3, true, &seen3).Verify(cq_.get());
+
+ srv_stream.Read(&recv_request, tag(4));
+
+ Verifier().ExpectUnless(3, true, seen3).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ srv_stream.Read(&recv_request, tag(5));
+ Verifier().Expect(5, false).Verify(cq_.get());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.WriteLast(send_response, WriteOptions(), tag(6));
+ srv_stream.Finish(Status::OK, tag(7));
+ cli_stream->Read(&recv_response, tag(8));
+ Verifier().Expect(6, true).Expect(7, true).Expect(8, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->Finish(&recv_status, tag(9));
+ Verifier().Expect(9, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// Metadata tests
+TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
std::pair<TString, TString> meta1("key1", "val1");
std::pair<TString, TString> meta2("key2", "val2");
std::pair<TString, TString> meta3("g.r.d-bin", "xyz");
- cli_ctx.AddMetadata(meta1.first, meta1.second);
- cli_ctx.AddMetadata(meta2.first, meta2.second);
- cli_ctx.AddMetadata(meta3.first, meta3.second);
-
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
- const auto& client_initial_metadata = srv_ctx.client_metadata();
- EXPECT_EQ(meta1.second,
- ToString(client_initial_metadata.find(meta1.first)->second));
- EXPECT_EQ(meta2.second,
- ToString(client_initial_metadata.find(meta2.first)->second));
- EXPECT_EQ(meta3.second,
- ToString(client_initial_metadata.find(meta3.first)->second));
- EXPECT_GE(client_initial_metadata.size(), static_cast<size_t>(2));
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
+ cli_ctx.AddMetadata(meta1.first, meta1.second);
+ cli_ctx.AddMetadata(meta2.first, meta2.second);
+ cli_ctx.AddMetadata(meta3.first, meta3.second);
+
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+ const auto& client_initial_metadata = srv_ctx.client_metadata();
+ EXPECT_EQ(meta1.second,
+ ToString(client_initial_metadata.find(meta1.first)->second));
+ EXPECT_EQ(meta2.second,
+ ToString(client_initial_metadata.find(meta2.first)->second));
+ EXPECT_EQ(meta3.second,
+ ToString(client_initial_metadata.find(meta3.first)->second));
+ EXPECT_GE(client_initial_metadata.size(), static_cast<size_t>(2));
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
std::pair<TString, TString> meta1("key1", "val1");
std::pair<TString, TString> meta2("key2", "val2");
-
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
- response_reader->ReadInitialMetadata(tag(4));
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
- srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
- srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
- response_writer.SendInitialMetadata(tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- const auto& server_initial_metadata = cli_ctx.GetServerInitialMetadata();
- EXPECT_EQ(meta1.second,
- ToString(server_initial_metadata.find(meta1.first)->second));
- EXPECT_EQ(meta2.second,
- ToString(server_initial_metadata.find(meta2.first)->second));
- EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(5));
- response_reader->Finish(&recv_response, &recv_status, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-// 1 ping, 2 pongs.
-TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreaming) {
- ResetStub();
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
-
+
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+ response_reader->ReadInitialMetadata(tag(4));
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+ srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
+ srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
+ response_writer.SendInitialMetadata(tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ const auto& server_initial_metadata = cli_ctx.GetServerInitialMetadata();
+ EXPECT_EQ(meta1.second,
+ ToString(server_initial_metadata.find(meta1.first)->second));
+ EXPECT_EQ(meta2.second,
+ ToString(server_initial_metadata.find(meta2.first)->second));
+ EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(5));
+ response_reader->Finish(&recv_response, &recv_status, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// 1 ping, 2 pongs.
+TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreaming) {
+ ResetStub();
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
std::pair<::TString, ::TString> meta1("key1", "val1");
std::pair<::TString, ::TString> meta2("key2", "val2");
-
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
- cli_stream->ReadInitialMetadata(tag(11));
- service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
-
- srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
- srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
- srv_stream.SendInitialMetadata(tag(10));
- Verifier().Expect(10, true).Expect(11, true).Verify(cq_.get());
- auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
- EXPECT_EQ(meta1.second,
- ToString(server_initial_metadata.find(meta1.first)->second));
- EXPECT_EQ(meta2.second,
- ToString(server_initial_metadata.find(meta2.first)->second));
- EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());
-
- srv_stream.Write(send_response, tag(3));
-
- cli_stream->Read(&recv_response, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
-
- srv_stream.Write(send_response, tag(5));
- cli_stream->Read(&recv_response, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
-
- srv_stream.Finish(Status::OK, tag(7));
- cli_stream->Read(&recv_response, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status, tag(9));
- Verifier().Expect(9, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-// 1 ping, 2 pongs.
-// Test for server initial metadata being sent implicitly
-TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreamingImplicit) {
- ResetStub();
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
+
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
+ cli_stream->ReadInitialMetadata(tag(11));
+ service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+
+ srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
+ srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
+ srv_stream.SendInitialMetadata(tag(10));
+ Verifier().Expect(10, true).Expect(11, true).Verify(cq_.get());
+ auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
+ EXPECT_EQ(meta1.second,
+ ToString(server_initial_metadata.find(meta1.first)->second));
+ EXPECT_EQ(meta2.second,
+ ToString(server_initial_metadata.find(meta2.first)->second));
+ EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());
+
+ srv_stream.Write(send_response, tag(3));
+
+ cli_stream->Read(&recv_response, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+
+ srv_stream.Write(send_response, tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+
+ srv_stream.Finish(Status::OK, tag(7));
+ cli_stream->Read(&recv_response, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(9));
+ Verifier().Expect(9, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+// 1 ping, 2 pongs.
+// Test for server initial metadata being sent implicitly
+TEST_P(AsyncEnd2endTest, ServerInitialMetadataServerStreamingImplicit) {
+ ResetStub();
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
std::pair<::TString, ::TString> meta1("key1", "val1");
std::pair<::TString, ::TString> meta2("key2", "val2");
-
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
- service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
- srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
- send_response.set_message(recv_request.message());
- srv_stream.Write(send_response, tag(3));
-
- cli_stream->Read(&recv_response, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
- EXPECT_EQ(meta1.second,
- ToString(server_initial_metadata.find(meta1.first)->second));
- EXPECT_EQ(meta2.second,
- ToString(server_initial_metadata.find(meta2.first)->second));
- EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());
-
- srv_stream.Write(send_response, tag(5));
- cli_stream->Read(&recv_response, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
-
- srv_stream.Finish(Status::OK, tag(7));
- cli_stream->Read(&recv_response, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status, tag(9));
- Verifier().Expect(9, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status.ok());
-}
-
-TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
+
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, cq_.get(), tag(1)));
+ service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
+ srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
+ send_response.set_message(recv_request.message());
+ srv_stream.Write(send_response, tag(3));
+
+ cli_stream->Read(&recv_response, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
+ EXPECT_EQ(meta1.second,
+ ToString(server_initial_metadata.find(meta1.first)->second));
+ EXPECT_EQ(meta2.second,
+ ToString(server_initial_metadata.find(meta2.first)->second));
+ EXPECT_EQ(static_cast<size_t>(2), server_initial_metadata.size());
+
+ srv_stream.Write(send_response, tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+
+ srv_stream.Finish(Status::OK, tag(7));
+ cli_stream->Read(&recv_response, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(9));
+ Verifier().Expect(9, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status.ok());
+}
+
+TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
std::pair<TString, TString> meta1("key1", "val1");
std::pair<TString, TString> meta2("key2", "val2");
-
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
- response_reader->Finish(&recv_response, &recv_status, tag(5));
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
- response_writer.SendInitialMetadata(tag(3));
- Verifier().Expect(3, true).Verify(cq_.get());
-
- send_response.set_message(recv_request.message());
- srv_ctx.AddTrailingMetadata(meta1.first, meta1.second);
- srv_ctx.AddTrailingMetadata(meta2.first, meta2.second);
- response_writer.Finish(send_response, Status::OK, tag(4));
-
- Verifier().Expect(4, true).Expect(5, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- const auto& server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
- EXPECT_EQ(meta1.second,
- ToString(server_trailing_metadata.find(meta1.first)->second));
- EXPECT_EQ(meta2.second,
- ToString(server_trailing_metadata.find(meta2.first)->second));
- EXPECT_EQ(static_cast<size_t>(2), server_trailing_metadata.size());
-}
-
-TEST_P(AsyncEnd2endTest, MetadataRpc) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
+
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+ response_reader->Finish(&recv_response, &recv_status, tag(5));
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+ response_writer.SendInitialMetadata(tag(3));
+ Verifier().Expect(3, true).Verify(cq_.get());
+
+ send_response.set_message(recv_request.message());
+ srv_ctx.AddTrailingMetadata(meta1.first, meta1.second);
+ srv_ctx.AddTrailingMetadata(meta2.first, meta2.second);
+ response_writer.Finish(send_response, Status::OK, tag(4));
+
+ Verifier().Expect(4, true).Expect(5, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ const auto& server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
+ EXPECT_EQ(meta1.second,
+ ToString(server_trailing_metadata.find(meta1.first)->second));
+ EXPECT_EQ(meta2.second,
+ ToString(server_trailing_metadata.find(meta2.first)->second));
+ EXPECT_EQ(static_cast<size_t>(2), server_trailing_metadata.size());
+}
+
+TEST_P(AsyncEnd2endTest, MetadataRpc) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
std::pair<TString, TString> meta1("key1", "val1");
std::pair<TString, TString> meta2(
- "key2-bin",
+ "key2-bin",
TString("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13));
std::pair<TString, TString> meta3("key3", "val3");
std::pair<TString, TString> meta6(
- "key4-bin",
+ "key4-bin",
TString("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
14));
std::pair<TString, TString> meta5("key5", "val5");
std::pair<TString, TString> meta4(
- "key6-bin",
+ "key6-bin",
TString(
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15));
-
- cli_ctx.AddMetadata(meta1.first, meta1.second);
- cli_ctx.AddMetadata(meta2.first, meta2.second);
-
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
- response_reader->ReadInitialMetadata(tag(4));
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
- const auto& client_initial_metadata = srv_ctx.client_metadata();
- EXPECT_EQ(meta1.second,
- ToString(client_initial_metadata.find(meta1.first)->second));
- EXPECT_EQ(meta2.second,
- ToString(client_initial_metadata.find(meta2.first)->second));
- EXPECT_GE(client_initial_metadata.size(), static_cast<size_t>(2));
-
- srv_ctx.AddInitialMetadata(meta3.first, meta3.second);
- srv_ctx.AddInitialMetadata(meta4.first, meta4.second);
- response_writer.SendInitialMetadata(tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- const auto& server_initial_metadata = cli_ctx.GetServerInitialMetadata();
- EXPECT_EQ(meta3.second,
- ToString(server_initial_metadata.find(meta3.first)->second));
- EXPECT_EQ(meta4.second,
- ToString(server_initial_metadata.find(meta4.first)->second));
- EXPECT_GE(server_initial_metadata.size(), static_cast<size_t>(2));
-
- send_response.set_message(recv_request.message());
- srv_ctx.AddTrailingMetadata(meta5.first, meta5.second);
- srv_ctx.AddTrailingMetadata(meta6.first, meta6.second);
- response_writer.Finish(send_response, Status::OK, tag(5));
- response_reader->Finish(&recv_response, &recv_status, tag(6));
-
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- const auto& server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
- EXPECT_EQ(meta5.second,
- ToString(server_trailing_metadata.find(meta5.first)->second));
- EXPECT_EQ(meta6.second,
- ToString(server_trailing_metadata.find(meta6.first)->second));
- EXPECT_GE(server_trailing_metadata.size(), static_cast<size_t>(2));
-}
-
-// Server uses AsyncNotifyWhenDone API to check for cancellation
-TEST_P(AsyncEnd2endTest, ServerCheckCancellation) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- srv_ctx.AsyncNotifyWhenDone(tag(5));
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
-
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- cli_ctx.TryCancel();
- Verifier().Expect(5, true).Expect(4, true).Verify(cq_.get());
- EXPECT_TRUE(srv_ctx.IsCancelled());
-
- EXPECT_EQ(StatusCode::CANCELLED, recv_status.error_code());
-}
-
-// Server uses AsyncNotifyWhenDone API to check for normal finish
-TEST_P(AsyncEnd2endTest, ServerCheckDone) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- srv_ctx.AsyncNotifyWhenDone(tag(5));
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
- cq_.get(), tag(2));
-
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Expect(5, true).Verify(cq_.get());
- EXPECT_FALSE(srv_ctx.IsCancelled());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
- ChannelArguments args;
- const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials(
- GetParam().credentials_type, &args);
- std::shared_ptr<Channel> channel =
- !(GetParam().inproc) ? ::grpc::CreateCustomChannel(server_address_.str(),
- channel_creds, args)
- : server_->InProcessChannel(args);
- std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
- stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
- EchoRequest send_request;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- send_request.set_message(GetParam().message_content);
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub->AsyncUnimplemented(&cli_ctx, send_request, cq_.get()));
-
- response_reader->Finish(&recv_response, &recv_status, tag(4));
- Verifier().Expect(4, true).Verify(cq_.get());
-
- EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
- EXPECT_EQ("", recv_status.error_message());
-}
-
-// This class is for testing scenarios where RPCs are cancelled on the server
-// by calling ServerContext::TryCancel(). Server uses AsyncNotifyWhenDone
-// API to check for cancellation
-class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
- protected:
- typedef enum {
- DO_NOT_CANCEL = 0,
- CANCEL_BEFORE_PROCESSING,
- CANCEL_DURING_PROCESSING,
- CANCEL_AFTER_PROCESSING
- } ServerTryCancelRequestPhase;
-
- // Helper for testing client-streaming RPCs which are cancelled on the server.
- // Depending on the value of server_try_cancel parameter, this will test one
- // of the following three scenarios:
- // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading
- // any messages from the client
- //
- // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
- // messages from the client
- //
- // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
- // messages from the client (but before sending any status back to the
- // client)
- void TestClientStreamingServerCancel(
- ServerTryCancelRequestPhase server_try_cancel) {
- ResetStub();
-
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- // Initiate the 'RequestStream' call on client
- CompletionQueue cli_cq;
-
- std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
- stub_->AsyncRequestStream(&cli_ctx, &recv_response, &cli_cq, tag(1)));
-
- // On the server, request to be notified of 'RequestStream' calls
- // and receive the 'RequestStream' call just made by the client
- srv_ctx.AsyncNotifyWhenDone(tag(11));
- service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
- std::thread t1([&cli_cq] { Verifier().Expect(1, true).Verify(&cli_cq); });
- Verifier().Expect(2, true).Verify(cq_.get());
- t1.join();
-
- bool expected_server_cq_result = true;
- bool expected_client_cq_result = true;
-
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- srv_ctx.TryCancel();
- Verifier().Expect(11, true).Verify(cq_.get());
- EXPECT_TRUE(srv_ctx.IsCancelled());
-
- // Since cancellation is done before server reads any results, we know
- // for sure that all server cq results will return false from this
- // point forward
- expected_server_cq_result = false;
- expected_client_cq_result = false;
- }
-
- bool ignore_client_cq_result =
- (server_try_cancel == CANCEL_DURING_PROCESSING) ||
- (server_try_cancel == CANCEL_BEFORE_PROCESSING);
-
- std::thread cli_thread([&cli_cq, &cli_stream, &expected_client_cq_result,
- &ignore_client_cq_result] {
- EchoRequest send_request;
- // Client sends 3 messages (tags 3, 4 and 5)
- for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15));
+
+ cli_ctx.AddMetadata(meta1.first, meta1.second);
+ cli_ctx.AddMetadata(meta2.first, meta2.second);
+
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+ response_reader->ReadInitialMetadata(tag(4));
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+ const auto& client_initial_metadata = srv_ctx.client_metadata();
+ EXPECT_EQ(meta1.second,
+ ToString(client_initial_metadata.find(meta1.first)->second));
+ EXPECT_EQ(meta2.second,
+ ToString(client_initial_metadata.find(meta2.first)->second));
+ EXPECT_GE(client_initial_metadata.size(), static_cast<size_t>(2));
+
+ srv_ctx.AddInitialMetadata(meta3.first, meta3.second);
+ srv_ctx.AddInitialMetadata(meta4.first, meta4.second);
+ response_writer.SendInitialMetadata(tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ const auto& server_initial_metadata = cli_ctx.GetServerInitialMetadata();
+ EXPECT_EQ(meta3.second,
+ ToString(server_initial_metadata.find(meta3.first)->second));
+ EXPECT_EQ(meta4.second,
+ ToString(server_initial_metadata.find(meta4.first)->second));
+ EXPECT_GE(server_initial_metadata.size(), static_cast<size_t>(2));
+
+ send_response.set_message(recv_request.message());
+ srv_ctx.AddTrailingMetadata(meta5.first, meta5.second);
+ srv_ctx.AddTrailingMetadata(meta6.first, meta6.second);
+ response_writer.Finish(send_response, Status::OK, tag(5));
+ response_reader->Finish(&recv_response, &recv_status, tag(6));
+
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ const auto& server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
+ EXPECT_EQ(meta5.second,
+ ToString(server_trailing_metadata.find(meta5.first)->second));
+ EXPECT_EQ(meta6.second,
+ ToString(server_trailing_metadata.find(meta6.first)->second));
+ EXPECT_GE(server_trailing_metadata.size(), static_cast<size_t>(2));
+}
+
+// Server uses AsyncNotifyWhenDone API to check for cancellation
+TEST_P(AsyncEnd2endTest, ServerCheckCancellation) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ srv_ctx.AsyncNotifyWhenDone(tag(5));
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ cli_ctx.TryCancel();
+ Verifier().Expect(5, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+
+ EXPECT_EQ(StatusCode::CANCELLED, recv_status.error_code());
+}
+
+// Server uses AsyncNotifyWhenDone API to check for normal finish
+TEST_P(AsyncEnd2endTest, ServerCheckDone) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ srv_ctx.AsyncNotifyWhenDone(tag(5));
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Expect(5, true).Verify(cq_.get());
+ EXPECT_FALSE(srv_ctx.IsCancelled());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+TEST_P(AsyncEnd2endTest, UnimplementedRpc) {
+ ChannelArguments args;
+ const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials(
+ GetParam().credentials_type, &args);
+ std::shared_ptr<Channel> channel =
+ !(GetParam().inproc) ? ::grpc::CreateCustomChannel(server_address_.str(),
+ channel_creds, args)
+ : server_->InProcessChannel(args);
+ std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
+ stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
+ EchoRequest send_request;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ send_request.set_message(GetParam().message_content);
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub->AsyncUnimplemented(&cli_ctx, send_request, cq_.get()));
+
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+ Verifier().Expect(4, true).Verify(cq_.get());
+
+ EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
+ EXPECT_EQ("", recv_status.error_message());
+}
+
+// This class is for testing scenarios where RPCs are cancelled on the server
+// by calling ServerContext::TryCancel(). Server uses AsyncNotifyWhenDone
+// API to check for cancellation
+class AsyncEnd2endServerTryCancelTest : public AsyncEnd2endTest {
+ protected:
+ typedef enum {
+ DO_NOT_CANCEL = 0,
+ CANCEL_BEFORE_PROCESSING,
+ CANCEL_DURING_PROCESSING,
+ CANCEL_AFTER_PROCESSING
+ } ServerTryCancelRequestPhase;
+
+ // Helper for testing client-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading
+ // any messages from the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
+ // messages from the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
+ // messages from the client (but before sending any status back to the
+ // client)
+ void TestClientStreamingServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ // Initiate the 'RequestStream' call on client
+ CompletionQueue cli_cq;
+
+ std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
+ stub_->AsyncRequestStream(&cli_ctx, &recv_response, &cli_cq, tag(1)));
+
+ // On the server, request to be notified of 'RequestStream' calls
+ // and receive the 'RequestStream' call just made by the client
+ srv_ctx.AsyncNotifyWhenDone(tag(11));
+ service_->RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+ std::thread t1([&cli_cq] { Verifier().Expect(1, true).Verify(&cli_cq); });
+ Verifier().Expect(2, true).Verify(cq_.get());
+ t1.join();
+
+ bool expected_server_cq_result = true;
+ bool expected_client_cq_result = true;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ srv_ctx.TryCancel();
+ Verifier().Expect(11, true).Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+
+ // Since cancellation is done before server reads any results, we know
+ // for sure that all server cq results will return false from this
+ // point forward
+ expected_server_cq_result = false;
+ expected_client_cq_result = false;
+ }
+
+ bool ignore_client_cq_result =
+ (server_try_cancel == CANCEL_DURING_PROCESSING) ||
+ (server_try_cancel == CANCEL_BEFORE_PROCESSING);
+
+ std::thread cli_thread([&cli_cq, &cli_stream, &expected_client_cq_result,
+ &ignore_client_cq_result] {
+ EchoRequest send_request;
+ // Client sends 3 messages (tags 3, 4 and 5)
+ for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
send_request.set_message("Ping " + ToString(tag_idx));
- cli_stream->Write(send_request, tag(tag_idx));
- Verifier()
- .Expect(tag_idx, expected_client_cq_result)
- .Verify(&cli_cq, ignore_client_cq_result);
- }
- cli_stream->WritesDone(tag(6));
- // Ignore ok on WritesDone since cancel can affect it
- Verifier()
- .Expect(6, expected_client_cq_result)
- .Verify(&cli_cq, ignore_client_cq_result);
- });
-
- bool ignore_cq_result = false;
- bool want_done_tag = false;
- std::thread* server_try_cancel_thd = nullptr;
-
- auto verif = Verifier();
-
- if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd =
- new std::thread([&srv_ctx] { srv_ctx.TryCancel(); });
- // Server will cancel the RPC in a parallel thread while reading the
- // requests from the client. Since the cancellation can happen at anytime,
- // some of the cq results (i.e those until cancellation) might be true but
- // its non deterministic. So better to ignore the cq results
- ignore_cq_result = true;
- // Expect that we might possibly see the done tag that
- // indicates cancellation completion in this case
- want_done_tag = true;
- verif.Expect(11, true);
- }
-
- // Server reads 3 messages (tags 6, 7 and 8)
- // But if want_done_tag is true, we might also see tag 11
- for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
- srv_stream.Read(&recv_request, tag(tag_idx));
- // Note that we'll add something to the verifier and verify that
- // something was seen, but it might be tag 11 and not what we
- // just added
- int got_tag = verif.Expect(tag_idx, expected_server_cq_result)
- .Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));
- if (got_tag == 11) {
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- // Now get the other entry that we were waiting on
- EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);
- }
- }
-
- cli_thread.join();
-
- if (server_try_cancel_thd != nullptr) {
- server_try_cancel_thd->join();
- delete server_try_cancel_thd;
- }
-
- if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- srv_ctx.TryCancel();
- want_done_tag = true;
- verif.Expect(11, true);
- }
-
- if (want_done_tag) {
- verif.Verify(cq_.get());
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- }
-
- // The RPC has been cancelled at this point for sure (i.e irrespective of
- // the value of `server_try_cancel` is). So, from this point forward, we
- // know that cq results are supposed to return false on server.
-
- // Server sends the final message and cancelled status (but the RPC is
- // already cancelled at this point. So we expect the operation to fail)
- srv_stream.Finish(send_response, Status::CANCELLED, tag(9));
- Verifier().Expect(9, false).Verify(cq_.get());
-
- // Client will see the cancellation
- cli_stream->Finish(&recv_status, tag(10));
- Verifier().Expect(10, true).Verify(&cli_cq);
- EXPECT_FALSE(recv_status.ok());
- EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
-
- cli_cq.Shutdown();
- void* dummy_tag;
- bool dummy_ok;
- while (cli_cq.Next(&dummy_tag, &dummy_ok)) {
- }
- }
-
- // Helper for testing server-streaming RPCs which are cancelled on the server.
- // Depending on the value of server_try_cancel parameter, this will test one
- // of the following three scenarios:
- // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before sending
- // any messages to the client
- //
- // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while sending
- // messages to the client
- //
- // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after sending all
- // messages to the client (but before sending any status back to the
- // client)
- void TestServerStreamingServerCancel(
- ServerTryCancelRequestPhase server_try_cancel) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
-
- send_request.set_message("Ping");
- // Initiate the 'ResponseStream' call on the client
- CompletionQueue cli_cq;
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx, send_request, &cli_cq, tag(1)));
- // On the server, request to be notified of 'ResponseStream' calls and
- // receive the call just made by the client
- srv_ctx.AsyncNotifyWhenDone(tag(11));
- service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- std::thread t1([&cli_cq] { Verifier().Expect(1, true).Verify(&cli_cq); });
- Verifier().Expect(2, true).Verify(cq_.get());
- t1.join();
-
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- bool expected_cq_result = true;
- bool ignore_cq_result = false;
- bool want_done_tag = false;
- bool expected_client_cq_result = true;
- bool ignore_client_cq_result =
- (server_try_cancel != CANCEL_BEFORE_PROCESSING);
-
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- srv_ctx.TryCancel();
- Verifier().Expect(11, true).Verify(cq_.get());
- EXPECT_TRUE(srv_ctx.IsCancelled());
-
- // We know for sure that all cq results will be false from this point
- // since the server cancelled the RPC
- expected_cq_result = false;
- expected_client_cq_result = false;
- }
-
- std::thread cli_thread([&cli_cq, &cli_stream, &expected_client_cq_result,
- &ignore_client_cq_result] {
- // Client attempts to read the three messages from the server
- for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
- EchoResponse recv_response;
- cli_stream->Read(&recv_response, tag(tag_idx));
- Verifier()
- .Expect(tag_idx, expected_client_cq_result)
- .Verify(&cli_cq, ignore_client_cq_result);
- }
- });
-
- std::thread* server_try_cancel_thd = nullptr;
-
- auto verif = Verifier();
-
- if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd =
- new std::thread([&srv_ctx] { srv_ctx.TryCancel(); });
-
- // Server will cancel the RPC in a parallel thread while writing responses
- // to the client. Since the cancellation can happen at anytime, some of
- // the cq results (i.e those until cancellation) might be true but it is
- // non deterministic. So better to ignore the cq results
- ignore_cq_result = true;
- // Expect that we might possibly see the done tag that
- // indicates cancellation completion in this case
- want_done_tag = true;
- verif.Expect(11, true);
- }
-
- // Server sends three messages (tags 3, 4 and 5)
- // But if want_done tag is true, we might also see tag 11
- for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
+ cli_stream->Write(send_request, tag(tag_idx));
+ Verifier()
+ .Expect(tag_idx, expected_client_cq_result)
+ .Verify(&cli_cq, ignore_client_cq_result);
+ }
+ cli_stream->WritesDone(tag(6));
+ // Ignore ok on WritesDone since cancel can affect it
+ Verifier()
+ .Expect(6, expected_client_cq_result)
+ .Verify(&cli_cq, ignore_client_cq_result);
+ });
+
+ bool ignore_cq_result = false;
+ bool want_done_tag = false;
+ std::thread* server_try_cancel_thd = nullptr;
+
+ auto verif = Verifier();
+
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread([&srv_ctx] { srv_ctx.TryCancel(); });
+ // Server will cancel the RPC in a parallel thread while reading the
+ // requests from the client. Since the cancellation can happen at anytime,
+ // some of the cq results (i.e those until cancellation) might be true but
+ // its non deterministic. So better to ignore the cq results
+ ignore_cq_result = true;
+ // Expect that we might possibly see the done tag that
+ // indicates cancellation completion in this case
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ // Server reads 3 messages (tags 6, 7 and 8)
+ // But if want_done_tag is true, we might also see tag 11
+ for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
+ srv_stream.Read(&recv_request, tag(tag_idx));
+ // Note that we'll add something to the verifier and verify that
+ // something was seen, but it might be tag 11 and not what we
+ // just added
+ int got_tag = verif.Expect(tag_idx, expected_server_cq_result)
+ .Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);
+ }
+ }
+
+ cli_thread.join();
+
+ if (server_try_cancel_thd != nullptr) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ srv_ctx.TryCancel();
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ if (want_done_tag) {
+ verif.Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ }
+
+ // The RPC has been cancelled at this point for sure (i.e irrespective of
+ // the value of `server_try_cancel` is). So, from this point forward, we
+ // know that cq results are supposed to return false on server.
+
+ // Server sends the final message and cancelled status (but the RPC is
+ // already cancelled at this point. So we expect the operation to fail)
+ srv_stream.Finish(send_response, Status::CANCELLED, tag(9));
+ Verifier().Expect(9, false).Verify(cq_.get());
+
+ // Client will see the cancellation
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier().Expect(10, true).Verify(&cli_cq);
+ EXPECT_FALSE(recv_status.ok());
+ EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
+
+ cli_cq.Shutdown();
+ void* dummy_tag;
+ bool dummy_ok;
+ while (cli_cq.Next(&dummy_tag, &dummy_ok)) {
+ }
+ }
+
+ // Helper for testing server-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before sending
+ // any messages to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while sending
+ // messages to the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after sending all
+ // messages to the client (but before sending any status back to the
+ // client)
+ void TestServerStreamingServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+
+ send_request.set_message("Ping");
+ // Initiate the 'ResponseStream' call on the client
+ CompletionQueue cli_cq;
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx, send_request, &cli_cq, tag(1)));
+ // On the server, request to be notified of 'ResponseStream' calls and
+ // receive the call just made by the client
+ srv_ctx.AsyncNotifyWhenDone(tag(11));
+ service_->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ std::thread t1([&cli_cq] { Verifier().Expect(1, true).Verify(&cli_cq); });
+ Verifier().Expect(2, true).Verify(cq_.get());
+ t1.join();
+
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ bool expected_cq_result = true;
+ bool ignore_cq_result = false;
+ bool want_done_tag = false;
+ bool expected_client_cq_result = true;
+ bool ignore_client_cq_result =
+ (server_try_cancel != CANCEL_BEFORE_PROCESSING);
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ srv_ctx.TryCancel();
+ Verifier().Expect(11, true).Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+
+ // We know for sure that all cq results will be false from this point
+ // since the server cancelled the RPC
+ expected_cq_result = false;
+ expected_client_cq_result = false;
+ }
+
+ std::thread cli_thread([&cli_cq, &cli_stream, &expected_client_cq_result,
+ &ignore_client_cq_result] {
+ // Client attempts to read the three messages from the server
+ for (int tag_idx = 6; tag_idx <= 8; tag_idx++) {
+ EchoResponse recv_response;
+ cli_stream->Read(&recv_response, tag(tag_idx));
+ Verifier()
+ .Expect(tag_idx, expected_client_cq_result)
+ .Verify(&cli_cq, ignore_client_cq_result);
+ }
+ });
+
+ std::thread* server_try_cancel_thd = nullptr;
+
+ auto verif = Verifier();
+
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread([&srv_ctx] { srv_ctx.TryCancel(); });
+
+ // Server will cancel the RPC in a parallel thread while writing responses
+ // to the client. Since the cancellation can happen at anytime, some of
+ // the cq results (i.e those until cancellation) might be true but it is
+ // non deterministic. So better to ignore the cq results
+ ignore_cq_result = true;
+ // Expect that we might possibly see the done tag that
+ // indicates cancellation completion in this case
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ // Server sends three messages (tags 3, 4 and 5)
+ // But if want_done tag is true, we might also see tag 11
+ for (int tag_idx = 3; tag_idx <= 5; tag_idx++) {
send_response.set_message("Pong " + ToString(tag_idx));
- srv_stream.Write(send_response, tag(tag_idx));
- // Note that we'll add something to the verifier and verify that
- // something was seen, but it might be tag 11 and not what we
- // just added
- int got_tag = verif.Expect(tag_idx, expected_cq_result)
- .Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));
- if (got_tag == 11) {
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- // Now get the other entry that we were waiting on
- EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);
- }
- }
-
- if (server_try_cancel_thd != nullptr) {
- server_try_cancel_thd->join();
- delete server_try_cancel_thd;
- }
-
- if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- srv_ctx.TryCancel();
- want_done_tag = true;
- verif.Expect(11, true);
- }
-
- if (want_done_tag) {
- verif.Verify(cq_.get());
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- }
-
- cli_thread.join();
-
- // The RPC has been cancelled at this point for sure (i.e irrespective of
- // the value of `server_try_cancel` is). So, from this point forward, we
- // know that cq results are supposed to return false on server.
-
- // Server finishes the stream (but the RPC is already cancelled)
- srv_stream.Finish(Status::CANCELLED, tag(9));
- Verifier().Expect(9, false).Verify(cq_.get());
-
- // Client will see the cancellation
- cli_stream->Finish(&recv_status, tag(10));
- Verifier().Expect(10, true).Verify(&cli_cq);
- EXPECT_FALSE(recv_status.ok());
- EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
-
- cli_cq.Shutdown();
- void* dummy_tag;
- bool dummy_ok;
- while (cli_cq.Next(&dummy_tag, &dummy_ok)) {
- }
- }
-
- // Helper for testing bidirectinal-streaming RPCs which are cancelled on the
- // server.
- //
- // Depending on the value of server_try_cancel parameter, this will
- // test one of the following three scenarios:
- // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/
- // writing any messages from/to the client
- //
- // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
- // messages from the client
- //
- // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
- // messages from the client (but before sending any status back to the
- // client)
- void TestBidiStreamingServerCancel(
- ServerTryCancelRequestPhase server_try_cancel) {
- ResetStub();
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- // Initiate the call from the client side
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
- cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
-
- // On the server, request to be notified of the 'BidiStream' call and
- // receive the call just made by the client
- srv_ctx.AsyncNotifyWhenDone(tag(11));
- service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
-
- auto verif = Verifier();
-
- // Client sends the first and the only message
- send_request.set_message("Ping");
- cli_stream->Write(send_request, tag(3));
- verif.Expect(3, true);
-
- bool expected_cq_result = true;
- bool ignore_cq_result = false;
- bool want_done_tag = false;
-
- int got_tag, got_tag2;
- bool tag_3_done = false;
-
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
- srv_ctx.TryCancel();
- verif.Expect(11, true);
- // We know for sure that all server cq results will be false from
- // this point since the server cancelled the RPC. However, we can't
- // say for sure about the client
- expected_cq_result = false;
- ignore_cq_result = true;
-
- do {
- got_tag = verif.Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT(((got_tag == 3) && !tag_3_done) || (got_tag == 11));
- if (got_tag == 3) {
- tag_3_done = true;
- }
- } while (got_tag != 11);
- EXPECT_TRUE(srv_ctx.IsCancelled());
- }
-
- std::thread* server_try_cancel_thd = nullptr;
-
- if (server_try_cancel == CANCEL_DURING_PROCESSING) {
- server_try_cancel_thd =
- new std::thread([&srv_ctx] { srv_ctx.TryCancel(); });
-
- // Since server is going to cancel the RPC in a parallel thread, some of
- // the cq results (i.e those until the cancellation) might be true. Since
- // that number is non-deterministic, it is better to ignore the cq results
- ignore_cq_result = true;
- // Expect that we might possibly see the done tag that
- // indicates cancellation completion in this case
- want_done_tag = true;
- verif.Expect(11, true);
- }
-
- srv_stream.Read(&recv_request, tag(4));
- verif.Expect(4, expected_cq_result);
- got_tag = tag_3_done ? 3 : verif.Next(cq_.get(), ignore_cq_result);
- got_tag2 = verif.Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == 3) || (got_tag == 4) ||
- (got_tag == 11 && want_done_tag));
- GPR_ASSERT((got_tag2 == 3) || (got_tag2 == 4) ||
- (got_tag2 == 11 && want_done_tag));
- // If we get 3 and 4, we don't need to wait for 11, but if
- // we get 11, we should also clear 3 and 4
- if (got_tag + got_tag2 != 7) {
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- got_tag = verif.Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == 3) || (got_tag == 4));
- }
-
- send_response.set_message("Pong");
- srv_stream.Write(send_response, tag(5));
- verif.Expect(5, expected_cq_result);
-
- cli_stream->Read(&recv_response, tag(6));
- verif.Expect(6, expected_cq_result);
- got_tag = verif.Next(cq_.get(), ignore_cq_result);
- got_tag2 = verif.Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == 5) || (got_tag == 6) ||
- (got_tag == 11 && want_done_tag));
- GPR_ASSERT((got_tag2 == 5) || (got_tag2 == 6) ||
- (got_tag2 == 11 && want_done_tag));
- // If we get 5 and 6, we don't need to wait for 11, but if
- // we get 11, we should also clear 5 and 6
- if (got_tag + got_tag2 != 11) {
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- got_tag = verif.Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == 5) || (got_tag == 6));
- }
-
- // This is expected to succeed in all cases
- cli_stream->WritesDone(tag(7));
- verif.Expect(7, true);
- // TODO(vjpai): Consider whether the following is too flexible
- // or whether it should just be reset to ignore_cq_result
- bool ignore_cq_wd_result =
- ignore_cq_result || (server_try_cancel == CANCEL_BEFORE_PROCESSING);
- got_tag = verif.Next(cq_.get(), ignore_cq_wd_result);
- GPR_ASSERT((got_tag == 7) || (got_tag == 11 && want_done_tag));
- if (got_tag == 11) {
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- // Now get the other entry that we were waiting on
- EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_wd_result), 7);
- }
-
- // This is expected to fail in all cases i.e for all values of
- // server_try_cancel. This is because at this point, either there are no
- // more msgs from the client (because client called WritesDone) or the RPC
- // is cancelled on the server
- srv_stream.Read(&recv_request, tag(8));
- verif.Expect(8, false);
- got_tag = verif.Next(cq_.get(), ignore_cq_result);
- GPR_ASSERT((got_tag == 8) || (got_tag == 11 && want_done_tag));
- if (got_tag == 11) {
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- // Now get the other entry that we were waiting on
- EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 8);
- }
-
- if (server_try_cancel_thd != nullptr) {
- server_try_cancel_thd->join();
- delete server_try_cancel_thd;
- }
-
- if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
- srv_ctx.TryCancel();
- want_done_tag = true;
- verif.Expect(11, true);
- }
-
- if (want_done_tag) {
- verif.Verify(cq_.get());
- EXPECT_TRUE(srv_ctx.IsCancelled());
- want_done_tag = false;
- }
-
- // The RPC has been cancelled at this point for sure (i.e irrespective of
- // the value of `server_try_cancel` is). So, from this point forward, we
- // know that cq results are supposed to return false on server.
-
- srv_stream.Finish(Status::CANCELLED, tag(9));
- Verifier().Expect(9, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status, tag(10));
- Verifier().Expect(10, true).Verify(cq_.get());
- EXPECT_FALSE(recv_status.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, recv_status.error_code());
- }
-};
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelBefore) {
- TestClientStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelDuring) {
- TestClientStreamingServerCancel(CANCEL_DURING_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelAfter) {
- TestClientStreamingServerCancel(CANCEL_AFTER_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelBefore) {
- TestServerStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelDuring) {
- TestServerStreamingServerCancel(CANCEL_DURING_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelAfter) {
- TestServerStreamingServerCancel(CANCEL_AFTER_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelBefore) {
- TestBidiStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelDuring) {
- TestBidiStreamingServerCancel(CANCEL_DURING_PROCESSING);
-}
-
-TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelAfter) {
- TestBidiStreamingServerCancel(CANCEL_AFTER_PROCESSING);
-}
-
-std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/,
- bool test_message_size_limit) {
- std::vector<TestScenario> scenarios;
+ srv_stream.Write(send_response, tag(tag_idx));
+ // Note that we'll add something to the verifier and verify that
+ // something was seen, but it might be tag 11 and not what we
+ // just added
+ int got_tag = verif.Expect(tag_idx, expected_cq_result)
+ .Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == tag_idx) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), tag_idx);
+ }
+ }
+
+ if (server_try_cancel_thd != nullptr) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ srv_ctx.TryCancel();
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ if (want_done_tag) {
+ verif.Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ }
+
+ cli_thread.join();
+
+ // The RPC has been cancelled at this point for sure (i.e irrespective of
+ // the value of `server_try_cancel` is). So, from this point forward, we
+ // know that cq results are supposed to return false on server.
+
+ // Server finishes the stream (but the RPC is already cancelled)
+ srv_stream.Finish(Status::CANCELLED, tag(9));
+ Verifier().Expect(9, false).Verify(cq_.get());
+
+ // Client will see the cancellation
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier().Expect(10, true).Verify(&cli_cq);
+ EXPECT_FALSE(recv_status.ok());
+ EXPECT_EQ(::grpc::StatusCode::CANCELLED, recv_status.error_code());
+
+ cli_cq.Shutdown();
+ void* dummy_tag;
+ bool dummy_ok;
+ while (cli_cq.Next(&dummy_tag, &dummy_ok)) {
+ }
+ }
+
+ // Helper for testing bidirectinal-streaming RPCs which are cancelled on the
+ // server.
+ //
+ // Depending on the value of server_try_cancel parameter, this will
+ // test one of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/
+ // writing any messages from/to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
+ // messages from the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
+ // messages from the client (but before sending any status back to the
+ // client)
+ void TestBidiStreamingServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ ResetStub();
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ // Initiate the call from the client side
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
+
+ // On the server, request to be notified of the 'BidiStream' call and
+ // receive the call just made by the client
+ srv_ctx.AsyncNotifyWhenDone(tag(11));
+ service_->RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+
+ auto verif = Verifier();
+
+ // Client sends the first and the only message
+ send_request.set_message("Ping");
+ cli_stream->Write(send_request, tag(3));
+ verif.Expect(3, true);
+
+ bool expected_cq_result = true;
+ bool ignore_cq_result = false;
+ bool want_done_tag = false;
+
+ int got_tag, got_tag2;
+ bool tag_3_done = false;
+
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ srv_ctx.TryCancel();
+ verif.Expect(11, true);
+ // We know for sure that all server cq results will be false from
+ // this point since the server cancelled the RPC. However, we can't
+ // say for sure about the client
+ expected_cq_result = false;
+ ignore_cq_result = true;
+
+ do {
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT(((got_tag == 3) && !tag_3_done) || (got_tag == 11));
+ if (got_tag == 3) {
+ tag_3_done = true;
+ }
+ } while (got_tag != 11);
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ }
+
+ std::thread* server_try_cancel_thd = nullptr;
+
+ if (server_try_cancel == CANCEL_DURING_PROCESSING) {
+ server_try_cancel_thd =
+ new std::thread([&srv_ctx] { srv_ctx.TryCancel(); });
+
+ // Since server is going to cancel the RPC in a parallel thread, some of
+ // the cq results (i.e those until the cancellation) might be true. Since
+ // that number is non-deterministic, it is better to ignore the cq results
+ ignore_cq_result = true;
+ // Expect that we might possibly see the done tag that
+ // indicates cancellation completion in this case
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ srv_stream.Read(&recv_request, tag(4));
+ verif.Expect(4, expected_cq_result);
+ got_tag = tag_3_done ? 3 : verif.Next(cq_.get(), ignore_cq_result);
+ got_tag2 = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 3) || (got_tag == 4) ||
+ (got_tag == 11 && want_done_tag));
+ GPR_ASSERT((got_tag2 == 3) || (got_tag2 == 4) ||
+ (got_tag2 == 11 && want_done_tag));
+ // If we get 3 and 4, we don't need to wait for 11, but if
+ // we get 11, we should also clear 3 and 4
+ if (got_tag + got_tag2 != 7) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 3) || (got_tag == 4));
+ }
+
+ send_response.set_message("Pong");
+ srv_stream.Write(send_response, tag(5));
+ verif.Expect(5, expected_cq_result);
+
+ cli_stream->Read(&recv_response, tag(6));
+ verif.Expect(6, expected_cq_result);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ got_tag2 = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 5) || (got_tag == 6) ||
+ (got_tag == 11 && want_done_tag));
+ GPR_ASSERT((got_tag2 == 5) || (got_tag2 == 6) ||
+ (got_tag2 == 11 && want_done_tag));
+ // If we get 5 and 6, we don't need to wait for 11, but if
+ // we get 11, we should also clear 5 and 6
+ if (got_tag + got_tag2 != 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 5) || (got_tag == 6));
+ }
+
+ // This is expected to succeed in all cases
+ cli_stream->WritesDone(tag(7));
+ verif.Expect(7, true);
+ // TODO(vjpai): Consider whether the following is too flexible
+ // or whether it should just be reset to ignore_cq_result
+ bool ignore_cq_wd_result =
+ ignore_cq_result || (server_try_cancel == CANCEL_BEFORE_PROCESSING);
+ got_tag = verif.Next(cq_.get(), ignore_cq_wd_result);
+ GPR_ASSERT((got_tag == 7) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_wd_result), 7);
+ }
+
+ // This is expected to fail in all cases i.e for all values of
+ // server_try_cancel. This is because at this point, either there are no
+ // more msgs from the client (because client called WritesDone) or the RPC
+ // is cancelled on the server
+ srv_stream.Read(&recv_request, tag(8));
+ verif.Expect(8, false);
+ got_tag = verif.Next(cq_.get(), ignore_cq_result);
+ GPR_ASSERT((got_tag == 8) || (got_tag == 11 && want_done_tag));
+ if (got_tag == 11) {
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ // Now get the other entry that we were waiting on
+ EXPECT_EQ(verif.Next(cq_.get(), ignore_cq_result), 8);
+ }
+
+ if (server_try_cancel_thd != nullptr) {
+ server_try_cancel_thd->join();
+ delete server_try_cancel_thd;
+ }
+
+ if (server_try_cancel == CANCEL_AFTER_PROCESSING) {
+ srv_ctx.TryCancel();
+ want_done_tag = true;
+ verif.Expect(11, true);
+ }
+
+ if (want_done_tag) {
+ verif.Verify(cq_.get());
+ EXPECT_TRUE(srv_ctx.IsCancelled());
+ want_done_tag = false;
+ }
+
+ // The RPC has been cancelled at this point for sure (i.e irrespective of
+ // the value of `server_try_cancel` is). So, from this point forward, we
+ // know that cq results are supposed to return false on server.
+
+ srv_stream.Finish(Status::CANCELLED, tag(9));
+ Verifier().Expect(9, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier().Expect(10, true).Verify(cq_.get());
+ EXPECT_FALSE(recv_status.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, recv_status.error_code());
+ }
+};
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelBefore) {
+ TestClientStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelDuring) {
+ TestClientStreamingServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ClientStreamingServerTryCancelAfter) {
+ TestClientStreamingServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelBefore) {
+ TestServerStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelDuring) {
+ TestServerStreamingServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerStreamingServerTryCancelAfter) {
+ TestServerStreamingServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelBefore) {
+ TestBidiStreamingServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelDuring) {
+ TestBidiStreamingServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+TEST_P(AsyncEnd2endServerTryCancelTest, ServerBidiStreamingTryCancelAfter) {
+ TestBidiStreamingServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/,
+ bool test_message_size_limit) {
+ std::vector<TestScenario> scenarios;
std::vector<TString> credentials_types;
std::vector<TString> messages;
-
- auto insec_ok = [] {
- // Only allow insecure credentials type when it is registered with the
- // provider. User may create providers that do not have insecure.
- return GetCredentialsProvider()->GetChannelCredentials(
- kInsecureCredentialsType, nullptr) != nullptr;
- };
-
- if (insec_ok()) {
- credentials_types.push_back(kInsecureCredentialsType);
- }
- auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList();
- for (auto sec = sec_list.begin(); sec != sec_list.end(); sec++) {
- credentials_types.push_back(*sec);
- }
- GPR_ASSERT(!credentials_types.empty());
-
- messages.push_back("Hello");
- if (test_message_size_limit) {
- for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024;
- k *= 32) {
+
+ auto insec_ok = [] {
+ // Only allow insecure credentials type when it is registered with the
+ // provider. User may create providers that do not have insecure.
+ return GetCredentialsProvider()->GetChannelCredentials(
+ kInsecureCredentialsType, nullptr) != nullptr;
+ };
+
+ if (insec_ok()) {
+ credentials_types.push_back(kInsecureCredentialsType);
+ }
+ auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList();
+ for (auto sec = sec_list.begin(); sec != sec_list.end(); sec++) {
+ credentials_types.push_back(*sec);
+ }
+ GPR_ASSERT(!credentials_types.empty());
+
+ messages.push_back("Hello");
+ if (test_message_size_limit) {
+ for (size_t k = 1; k < GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH / 1024;
+ k *= 32) {
TString big_msg;
- for (size_t i = 0; i < k * 1024; ++i) {
- char c = 'a' + (i % 26);
- big_msg += c;
- }
- messages.push_back(big_msg);
- }
+ for (size_t i = 0; i < k * 1024; ++i) {
+ char c = 'a' + (i % 26);
+ big_msg += c;
+ }
+ messages.push_back(big_msg);
+ }
if (!BuiltUnderMsan()) {
// 4MB message processing with SSL is very slow under msan
// (causes timeouts) and doesn't really increase the signal from tests.
@@ -1911,42 +1911,42 @@ std::vector<TestScenario> CreateTestScenarios(bool /*test_secure*/,
messages.push_back(
TString(GRPC_DEFAULT_MAX_RECV_MESSAGE_LENGTH - 100, 'a'));
}
- }
-
- // TODO (sreek) Renable tests with health check service after the issue
- // https://github.com/grpc/grpc/issues/11223 is resolved
- for (auto health_check_service : {false}) {
- for (auto msg = messages.begin(); msg != messages.end(); msg++) {
- for (auto cred = credentials_types.begin();
- cred != credentials_types.end(); ++cred) {
- scenarios.emplace_back(false, *cred, health_check_service, *msg);
- }
- if (insec_ok()) {
- scenarios.emplace_back(true, kInsecureCredentialsType,
- health_check_service, *msg);
- }
- }
- }
- return scenarios;
-}
-
-INSTANTIATE_TEST_SUITE_P(AsyncEnd2end, AsyncEnd2endTest,
- ::testing::ValuesIn(CreateTestScenarios(true, true)));
-INSTANTIATE_TEST_SUITE_P(AsyncEnd2endServerTryCancel,
- AsyncEnd2endServerTryCancelTest,
- ::testing::ValuesIn(CreateTestScenarios(false,
- false)));
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- // Change the backup poll interval from 5s to 100ms to speed up the
- // ReconnectChannel test
- GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 100);
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- int ret = RUN_ALL_TESTS();
- return ret;
-}
+ }
+
+ // TODO (sreek) Renable tests with health check service after the issue
+ // https://github.com/grpc/grpc/issues/11223 is resolved
+ for (auto health_check_service : {false}) {
+ for (auto msg = messages.begin(); msg != messages.end(); msg++) {
+ for (auto cred = credentials_types.begin();
+ cred != credentials_types.end(); ++cred) {
+ scenarios.emplace_back(false, *cred, health_check_service, *msg);
+ }
+ if (insec_ok()) {
+ scenarios.emplace_back(true, kInsecureCredentialsType,
+ health_check_service, *msg);
+ }
+ }
+ }
+ return scenarios;
+}
+
+INSTANTIATE_TEST_SUITE_P(AsyncEnd2end, AsyncEnd2endTest,
+ ::testing::ValuesIn(CreateTestScenarios(true, true)));
+INSTANTIATE_TEST_SUITE_P(AsyncEnd2endServerTryCancel,
+ AsyncEnd2endServerTryCancelTest,
+ ::testing::ValuesIn(CreateTestScenarios(false,
+ false)));
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ // Change the backup poll interval from 5s to 100ms to speed up the
+ // ReconnectChannel test
+ GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 100);
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int ret = RUN_ALL_TESTS();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
index 2f9c840a0f..9c723bebb6 100644
--- a/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/channelz_service_test.cc
@@ -1,767 +1,767 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <grpc/grpc.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include <grpcpp/ext/channelz_service_plugin.h>
-#include "src/core/lib/gpr/env.h"
-#include "src/proto/grpc/channelz/channelz.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-
-#include <gtest/gtest.h>
-
-using grpc::channelz::v1::GetChannelRequest;
-using grpc::channelz::v1::GetChannelResponse;
-using grpc::channelz::v1::GetServerRequest;
-using grpc::channelz::v1::GetServerResponse;
-using grpc::channelz::v1::GetServerSocketsRequest;
-using grpc::channelz::v1::GetServerSocketsResponse;
-using grpc::channelz::v1::GetServersRequest;
-using grpc::channelz::v1::GetServersResponse;
-using grpc::channelz::v1::GetSocketRequest;
-using grpc::channelz::v1::GetSocketResponse;
-using grpc::channelz::v1::GetSubchannelRequest;
-using grpc::channelz::v1::GetSubchannelResponse;
-using grpc::channelz::v1::GetTopChannelsRequest;
-using grpc::channelz::v1::GetTopChannelsResponse;
-
-namespace grpc {
-namespace testing {
-namespace {
-
-// Proxy service supports N backends. Sends RPC to backend dictated by
-// request->backend_channel_idx().
-class Proxy : public ::grpc::testing::EchoTestService::Service {
- public:
- Proxy() {}
-
- void AddChannelToBackend(const std::shared_ptr<Channel>& channel) {
- stubs_.push_back(grpc::testing::EchoTestService::NewStub(channel));
- }
-
- Status Echo(ServerContext* server_context, const EchoRequest* request,
- EchoResponse* response) override {
- std::unique_ptr<ClientContext> client_context =
- ClientContext::FromServerContext(*server_context);
- size_t idx = request->param().backend_channel_idx();
- GPR_ASSERT(idx < stubs_.size());
- return stubs_[idx]->Echo(client_context.get(), *request, response);
- }
-
- Status BidiStream(ServerContext* server_context,
- ServerReaderWriter<EchoResponse, EchoRequest>*
- stream_from_client) override {
- EchoRequest request;
- EchoResponse response;
- std::unique_ptr<ClientContext> client_context =
- ClientContext::FromServerContext(*server_context);
-
- // always use the first proxy for streaming
- auto stream_to_backend = stubs_[0]->BidiStream(client_context.get());
- while (stream_from_client->Read(&request)) {
- stream_to_backend->Write(request);
- stream_to_backend->Read(&response);
- stream_from_client->Write(response);
- }
-
- stream_to_backend->WritesDone();
- return stream_to_backend->Finish();
- }
-
- private:
- std::vector<std::unique_ptr<::grpc::testing::EchoTestService::Stub>> stubs_;
-};
-
-} // namespace
-
-class ChannelzServerTest : public ::testing::Test {
- public:
- ChannelzServerTest() {}
- static void SetUpTestCase() {
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
- }
- void SetUp() override {
- // ensure channel server is brought up on all severs we build.
- ::grpc::channelz::experimental::InitChannelzService();
-
- // We set up a proxy server with channelz enabled.
- proxy_port_ = grpc_pick_unused_port_or_die();
- ServerBuilder proxy_builder;
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <grpc/grpc.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include <grpcpp/ext/channelz_service_plugin.h>
+#include "src/core/lib/gpr/env.h"
+#include "src/proto/grpc/channelz/channelz.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include <gtest/gtest.h>
+
+using grpc::channelz::v1::GetChannelRequest;
+using grpc::channelz::v1::GetChannelResponse;
+using grpc::channelz::v1::GetServerRequest;
+using grpc::channelz::v1::GetServerResponse;
+using grpc::channelz::v1::GetServerSocketsRequest;
+using grpc::channelz::v1::GetServerSocketsResponse;
+using grpc::channelz::v1::GetServersRequest;
+using grpc::channelz::v1::GetServersResponse;
+using grpc::channelz::v1::GetSocketRequest;
+using grpc::channelz::v1::GetSocketResponse;
+using grpc::channelz::v1::GetSubchannelRequest;
+using grpc::channelz::v1::GetSubchannelResponse;
+using grpc::channelz::v1::GetTopChannelsRequest;
+using grpc::channelz::v1::GetTopChannelsResponse;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+// Proxy service supports N backends. Sends RPC to backend dictated by
+// request->backend_channel_idx().
+class Proxy : public ::grpc::testing::EchoTestService::Service {
+ public:
+ Proxy() {}
+
+ void AddChannelToBackend(const std::shared_ptr<Channel>& channel) {
+ stubs_.push_back(grpc::testing::EchoTestService::NewStub(channel));
+ }
+
+ Status Echo(ServerContext* server_context, const EchoRequest* request,
+ EchoResponse* response) override {
+ std::unique_ptr<ClientContext> client_context =
+ ClientContext::FromServerContext(*server_context);
+ size_t idx = request->param().backend_channel_idx();
+ GPR_ASSERT(idx < stubs_.size());
+ return stubs_[idx]->Echo(client_context.get(), *request, response);
+ }
+
+ Status BidiStream(ServerContext* server_context,
+ ServerReaderWriter<EchoResponse, EchoRequest>*
+ stream_from_client) override {
+ EchoRequest request;
+ EchoResponse response;
+ std::unique_ptr<ClientContext> client_context =
+ ClientContext::FromServerContext(*server_context);
+
+ // always use the first proxy for streaming
+ auto stream_to_backend = stubs_[0]->BidiStream(client_context.get());
+ while (stream_from_client->Read(&request)) {
+ stream_to_backend->Write(request);
+ stream_to_backend->Read(&response);
+ stream_from_client->Write(response);
+ }
+
+ stream_to_backend->WritesDone();
+ return stream_to_backend->Finish();
+ }
+
+ private:
+ std::vector<std::unique_ptr<::grpc::testing::EchoTestService::Stub>> stubs_;
+};
+
+} // namespace
+
+class ChannelzServerTest : public ::testing::Test {
+ public:
+ ChannelzServerTest() {}
+ static void SetUpTestCase() {
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+ }
+ void SetUp() override {
+ // ensure channel server is brought up on all severs we build.
+ ::grpc::channelz::experimental::InitChannelzService();
+
+ // We set up a proxy server with channelz enabled.
+ proxy_port_ = grpc_pick_unused_port_or_die();
+ ServerBuilder proxy_builder;
TString proxy_server_address = "localhost:" + to_string(proxy_port_);
- proxy_builder.AddListeningPort(proxy_server_address,
- InsecureServerCredentials());
- // forces channelz and channel tracing to be enabled.
- proxy_builder.AddChannelArgument(GRPC_ARG_ENABLE_CHANNELZ, 1);
- proxy_builder.AddChannelArgument(
- GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE, 1024);
- proxy_builder.RegisterService(&proxy_service_);
- proxy_server_ = proxy_builder.BuildAndStart();
- }
-
- // Sets the proxy up to have an arbitrary number of backends.
- void ConfigureProxy(size_t num_backends) {
- backends_.resize(num_backends);
- for (size_t i = 0; i < num_backends; ++i) {
- // create a new backend.
- backends_[i].port = grpc_pick_unused_port_or_die();
- ServerBuilder backend_builder;
+ proxy_builder.AddListeningPort(proxy_server_address,
+ InsecureServerCredentials());
+ // forces channelz and channel tracing to be enabled.
+ proxy_builder.AddChannelArgument(GRPC_ARG_ENABLE_CHANNELZ, 1);
+ proxy_builder.AddChannelArgument(
+ GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE, 1024);
+ proxy_builder.RegisterService(&proxy_service_);
+ proxy_server_ = proxy_builder.BuildAndStart();
+ }
+
+ // Sets the proxy up to have an arbitrary number of backends.
+ void ConfigureProxy(size_t num_backends) {
+ backends_.resize(num_backends);
+ for (size_t i = 0; i < num_backends; ++i) {
+ // create a new backend.
+ backends_[i].port = grpc_pick_unused_port_or_die();
+ ServerBuilder backend_builder;
TString backend_server_address =
- "localhost:" + to_string(backends_[i].port);
- backend_builder.AddListeningPort(backend_server_address,
- InsecureServerCredentials());
- backends_[i].service.reset(new TestServiceImpl);
- // ensure that the backend itself has channelz disabled.
- backend_builder.AddChannelArgument(GRPC_ARG_ENABLE_CHANNELZ, 0);
- backend_builder.RegisterService(backends_[i].service.get());
- backends_[i].server = backend_builder.BuildAndStart();
- // set up a channel to the backend. We ensure that this channel has
- // channelz enabled since these channels (proxy outbound to backends)
- // are the ones that our test will actually be validating.
- ChannelArguments args;
- args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 1);
- args.SetInt(GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE, 1024);
- std::shared_ptr<Channel> channel_to_backend = ::grpc::CreateCustomChannel(
- backend_server_address, InsecureChannelCredentials(), args);
- proxy_service_.AddChannelToBackend(channel_to_backend);
- }
- }
-
- void ResetStubs() {
- string target = "dns:localhost:" + to_string(proxy_port_);
- ChannelArguments args;
- // disable channelz. We only want to focus on proxy to backend outbound.
- args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 0);
- std::shared_ptr<Channel> channel =
- ::grpc::CreateCustomChannel(target, InsecureChannelCredentials(), args);
- channelz_stub_ = grpc::channelz::v1::Channelz::NewStub(channel);
- echo_stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- std::unique_ptr<grpc::testing::EchoTestService::Stub> NewEchoStub() {
- string target = "dns:localhost:" + to_string(proxy_port_);
- ChannelArguments args;
- // disable channelz. We only want to focus on proxy to backend outbound.
- args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 0);
- // This ensures that gRPC will not do connection sharing.
- args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
- std::shared_ptr<Channel> channel =
- ::grpc::CreateCustomChannel(target, InsecureChannelCredentials(), args);
- return grpc::testing::EchoTestService::NewStub(channel);
- }
-
- void SendSuccessfulEcho(int channel_idx) {
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello channelz");
- request.mutable_param()->set_backend_channel_idx(channel_idx);
- ClientContext context;
- Status s = echo_stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- }
-
- void SendSuccessfulStream(int num_messages) {
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello channelz");
- ClientContext context;
- auto stream_to_proxy = echo_stub_->BidiStream(&context);
- for (int i = 0; i < num_messages; ++i) {
- EXPECT_TRUE(stream_to_proxy->Write(request));
- EXPECT_TRUE(stream_to_proxy->Read(&response));
- }
- stream_to_proxy->WritesDone();
- Status s = stream_to_proxy->Finish();
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- }
-
- void SendFailedEcho(int channel_idx) {
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello channelz");
- request.mutable_param()->set_backend_channel_idx(channel_idx);
- auto* error = request.mutable_param()->mutable_expected_error();
- error->set_code(13); // INTERNAL
- error->set_error_message("error");
- ClientContext context;
- Status s = echo_stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- }
-
- // Uses GetTopChannels to return the channel_id of a particular channel,
- // so that the unit tests may test GetChannel call.
- intptr_t GetChannelId(int channel_idx) {
- GetTopChannelsRequest request;
- GetTopChannelsResponse response;
- request.set_start_channel_id(0);
- ClientContext context;
- Status s = channelz_stub_->GetTopChannels(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_GT(response.channel_size(), channel_idx);
- return response.channel(channel_idx).ref().channel_id();
- }
-
- static string to_string(const int number) {
- std::stringstream strs;
- strs << number;
- return strs.str();
- }
-
- protected:
- // package of data needed for each backend server.
- struct BackendData {
- std::unique_ptr<Server> server;
- int port;
- std::unique_ptr<TestServiceImpl> service;
- };
-
- std::unique_ptr<grpc::channelz::v1::Channelz::Stub> channelz_stub_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> echo_stub_;
-
- // proxy server to ping with channelz requests.
- std::unique_ptr<Server> proxy_server_;
- int proxy_port_;
- Proxy proxy_service_;
-
- // backends. All implement the echo service.
- std::vector<BackendData> backends_;
-};
-
-TEST_F(ChannelzServerTest, BasicTest) {
- ResetStubs();
- ConfigureProxy(1);
- GetTopChannelsRequest request;
- GetTopChannelsResponse response;
- request.set_start_channel_id(0);
- ClientContext context;
- Status s = channelz_stub_->GetTopChannels(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel_size(), 1);
-}
-
-TEST_F(ChannelzServerTest, HighStartId) {
- ResetStubs();
- ConfigureProxy(1);
- GetTopChannelsRequest request;
- GetTopChannelsResponse response;
- request.set_start_channel_id(10000);
- ClientContext context;
- Status s = channelz_stub_->GetTopChannels(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel_size(), 0);
-}
-
-TEST_F(ChannelzServerTest, SuccessfulRequestTest) {
- ResetStubs();
- ConfigureProxy(1);
- SendSuccessfulEcho(0);
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(0));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(), 1);
- EXPECT_EQ(response.channel().data().calls_succeeded(), 1);
- EXPECT_EQ(response.channel().data().calls_failed(), 0);
-}
-
-TEST_F(ChannelzServerTest, FailedRequestTest) {
- ResetStubs();
- ConfigureProxy(1);
- SendFailedEcho(0);
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(0));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(), 1);
- EXPECT_EQ(response.channel().data().calls_succeeded(), 0);
- EXPECT_EQ(response.channel().data().calls_failed(), 1);
-}
-
-TEST_F(ChannelzServerTest, ManyRequestsTest) {
- ResetStubs();
- ConfigureProxy(1);
- // send some RPCs
- const int kNumSuccess = 10;
- const int kNumFailed = 11;
- for (int i = 0; i < kNumSuccess; ++i) {
- SendSuccessfulEcho(0);
- }
- for (int i = 0; i < kNumFailed; ++i) {
- SendFailedEcho(0);
- }
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(0));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(),
- kNumSuccess + kNumFailed);
- EXPECT_EQ(response.channel().data().calls_succeeded(), kNumSuccess);
- EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
-}
-
-TEST_F(ChannelzServerTest, ManyChannels) {
- ResetStubs();
- const int kNumChannels = 4;
- ConfigureProxy(kNumChannels);
- GetTopChannelsRequest request;
- GetTopChannelsResponse response;
- request.set_start_channel_id(0);
- ClientContext context;
- Status s = channelz_stub_->GetTopChannels(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel_size(), kNumChannels);
-}
-
-TEST_F(ChannelzServerTest, ManyRequestsManyChannels) {
- ResetStubs();
- const int kNumChannels = 4;
- ConfigureProxy(kNumChannels);
- const int kNumSuccess = 10;
- const int kNumFailed = 11;
- for (int i = 0; i < kNumSuccess; ++i) {
- SendSuccessfulEcho(0);
- SendSuccessfulEcho(2);
- }
- for (int i = 0; i < kNumFailed; ++i) {
- SendFailedEcho(1);
- SendFailedEcho(2);
- }
-
- // the first channel saw only successes
- {
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(0));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(), kNumSuccess);
- EXPECT_EQ(response.channel().data().calls_succeeded(), kNumSuccess);
- EXPECT_EQ(response.channel().data().calls_failed(), 0);
- }
-
- // the second channel saw only failures
- {
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(1));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(), kNumFailed);
- EXPECT_EQ(response.channel().data().calls_succeeded(), 0);
- EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
- }
-
- // the third channel saw both
- {
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(2));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(),
- kNumSuccess + kNumFailed);
- EXPECT_EQ(response.channel().data().calls_succeeded(), kNumSuccess);
- EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
- }
-
- // the fourth channel saw nothing
- {
- GetChannelRequest request;
- GetChannelResponse response;
- request.set_channel_id(GetChannelId(3));
- ClientContext context;
- Status s = channelz_stub_->GetChannel(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.channel().data().calls_started(), 0);
- EXPECT_EQ(response.channel().data().calls_succeeded(), 0);
- EXPECT_EQ(response.channel().data().calls_failed(), 0);
- }
-}
-
-TEST_F(ChannelzServerTest, ManySubchannels) {
- ResetStubs();
- const int kNumChannels = 4;
- ConfigureProxy(kNumChannels);
- const int kNumSuccess = 10;
- const int kNumFailed = 11;
- for (int i = 0; i < kNumSuccess; ++i) {
- SendSuccessfulEcho(0);
- SendSuccessfulEcho(2);
- }
- for (int i = 0; i < kNumFailed; ++i) {
- SendFailedEcho(1);
- SendFailedEcho(2);
- }
- GetTopChannelsRequest gtc_request;
- GetTopChannelsResponse gtc_response;
- gtc_request.set_start_channel_id(0);
- ClientContext context;
- Status s =
- channelz_stub_->GetTopChannels(&context, gtc_request, &gtc_response);
- EXPECT_TRUE(s.ok()) << s.error_message();
- EXPECT_EQ(gtc_response.channel_size(), kNumChannels);
- for (int i = 0; i < gtc_response.channel_size(); ++i) {
- // if the channel sent no RPCs, then expect no subchannels to have been
- // created.
- if (gtc_response.channel(i).data().calls_started() == 0) {
- EXPECT_EQ(gtc_response.channel(i).subchannel_ref_size(), 0);
- continue;
- }
- // The resolver must return at least one address.
- ASSERT_GT(gtc_response.channel(i).subchannel_ref_size(), 0);
- GetSubchannelRequest gsc_request;
- GetSubchannelResponse gsc_response;
- gsc_request.set_subchannel_id(
- gtc_response.channel(i).subchannel_ref(0).subchannel_id());
- ClientContext context;
- Status s =
- channelz_stub_->GetSubchannel(&context, gsc_request, &gsc_response);
- EXPECT_TRUE(s.ok()) << s.error_message();
- EXPECT_EQ(gtc_response.channel(i).data().calls_started(),
- gsc_response.subchannel().data().calls_started());
- EXPECT_EQ(gtc_response.channel(i).data().calls_succeeded(),
- gsc_response.subchannel().data().calls_succeeded());
- EXPECT_EQ(gtc_response.channel(i).data().calls_failed(),
- gsc_response.subchannel().data().calls_failed());
- }
-}
-
-TEST_F(ChannelzServerTest, BasicServerTest) {
- ResetStubs();
- ConfigureProxy(1);
- GetServersRequest request;
- GetServersResponse response;
- request.set_start_server_id(0);
- ClientContext context;
- Status s = channelz_stub_->GetServers(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.server_size(), 1);
-}
-
-TEST_F(ChannelzServerTest, BasicGetServerTest) {
- ResetStubs();
- ConfigureProxy(1);
- GetServersRequest get_servers_request;
- GetServersResponse get_servers_response;
- get_servers_request.set_start_server_id(0);
- ClientContext get_servers_context;
- Status s = channelz_stub_->GetServers(
- &get_servers_context, get_servers_request, &get_servers_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_servers_response.server_size(), 1);
- GetServerRequest get_server_request;
- GetServerResponse get_server_response;
- get_server_request.set_server_id(
- get_servers_response.server(0).ref().server_id());
- ClientContext get_server_context;
- s = channelz_stub_->GetServer(&get_server_context, get_server_request,
- &get_server_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_servers_response.server(0).ref().server_id(),
- get_server_response.server().ref().server_id());
-}
-
-TEST_F(ChannelzServerTest, ServerCallTest) {
- ResetStubs();
- ConfigureProxy(1);
- const int kNumSuccess = 10;
- const int kNumFailed = 11;
- for (int i = 0; i < kNumSuccess; ++i) {
- SendSuccessfulEcho(0);
- }
- for (int i = 0; i < kNumFailed; ++i) {
- SendFailedEcho(0);
- }
- GetServersRequest request;
- GetServersResponse response;
- request.set_start_server_id(0);
- ClientContext context;
- Status s = channelz_stub_->GetServers(&context, request, &response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(response.server_size(), 1);
- EXPECT_EQ(response.server(0).data().calls_succeeded(), kNumSuccess);
- EXPECT_EQ(response.server(0).data().calls_failed(), kNumFailed);
- // This is success+failure+1 because the call that retrieved this information
- // will be counted as started. It will not track success/failure until after
- // it has returned, so that is not included in the response.
- EXPECT_EQ(response.server(0).data().calls_started(),
- kNumSuccess + kNumFailed + 1);
-}
-
-TEST_F(ChannelzServerTest, ManySubchannelsAndSockets) {
- ResetStubs();
- const int kNumChannels = 4;
- ConfigureProxy(kNumChannels);
- const int kNumSuccess = 10;
- const int kNumFailed = 11;
- for (int i = 0; i < kNumSuccess; ++i) {
- SendSuccessfulEcho(0);
- SendSuccessfulEcho(2);
- }
- for (int i = 0; i < kNumFailed; ++i) {
- SendFailedEcho(1);
- SendFailedEcho(2);
- }
- GetTopChannelsRequest gtc_request;
- GetTopChannelsResponse gtc_response;
- gtc_request.set_start_channel_id(0);
- ClientContext context;
- Status s =
- channelz_stub_->GetTopChannels(&context, gtc_request, &gtc_response);
- EXPECT_TRUE(s.ok()) << s.error_message();
- EXPECT_EQ(gtc_response.channel_size(), kNumChannels);
- for (int i = 0; i < gtc_response.channel_size(); ++i) {
- // if the channel sent no RPCs, then expect no subchannels to have been
- // created.
- if (gtc_response.channel(i).data().calls_started() == 0) {
- EXPECT_EQ(gtc_response.channel(i).subchannel_ref_size(), 0);
- continue;
- }
- // The resolver must return at least one address.
- ASSERT_GT(gtc_response.channel(i).subchannel_ref_size(), 0);
- // First grab the subchannel
- GetSubchannelRequest get_subchannel_req;
- GetSubchannelResponse get_subchannel_resp;
- get_subchannel_req.set_subchannel_id(
- gtc_response.channel(i).subchannel_ref(0).subchannel_id());
- ClientContext get_subchannel_ctx;
- Status s = channelz_stub_->GetSubchannel(
- &get_subchannel_ctx, get_subchannel_req, &get_subchannel_resp);
- EXPECT_TRUE(s.ok()) << s.error_message();
- EXPECT_EQ(get_subchannel_resp.subchannel().socket_ref_size(), 1);
- // Now grab the socket.
- GetSocketRequest get_socket_req;
- GetSocketResponse get_socket_resp;
- ClientContext get_socket_ctx;
- get_socket_req.set_socket_id(
- get_subchannel_resp.subchannel().socket_ref(0).socket_id());
- s = channelz_stub_->GetSocket(&get_socket_ctx, get_socket_req,
- &get_socket_resp);
- EXPECT_TRUE(
- get_subchannel_resp.subchannel().socket_ref(0).name().find("http"));
- EXPECT_TRUE(s.ok()) << s.error_message();
- // calls started == streams started AND stream succeeded. Since none of
- // these RPCs were canceled, all of the streams will succeeded even though
- // the RPCs they represent might have failed.
- EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_started(),
- get_socket_resp.socket().data().streams_started());
- EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_started(),
- get_socket_resp.socket().data().streams_succeeded());
- // All of the calls were unary, so calls started == messages sent.
- EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_started(),
- get_socket_resp.socket().data().messages_sent());
- // We only get responses when the RPC was successful, so
- // calls succeeded == messages received.
- EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_succeeded(),
- get_socket_resp.socket().data().messages_received());
- }
-}
-
-TEST_F(ChannelzServerTest, StreamingRPC) {
- ResetStubs();
- ConfigureProxy(1);
- const int kNumMessages = 5;
- SendSuccessfulStream(kNumMessages);
- // Get the channel
- GetChannelRequest get_channel_request;
- GetChannelResponse get_channel_response;
- get_channel_request.set_channel_id(GetChannelId(0));
- ClientContext get_channel_context;
- Status s = channelz_stub_->GetChannel(
- &get_channel_context, get_channel_request, &get_channel_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_channel_response.channel().data().calls_started(), 1);
- EXPECT_EQ(get_channel_response.channel().data().calls_succeeded(), 1);
- EXPECT_EQ(get_channel_response.channel().data().calls_failed(), 0);
- // Get the subchannel
- ASSERT_GT(get_channel_response.channel().subchannel_ref_size(), 0);
- GetSubchannelRequest get_subchannel_request;
- GetSubchannelResponse get_subchannel_response;
- ClientContext get_subchannel_context;
- get_subchannel_request.set_subchannel_id(
- get_channel_response.channel().subchannel_ref(0).subchannel_id());
- s = channelz_stub_->GetSubchannel(&get_subchannel_context,
- get_subchannel_request,
- &get_subchannel_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_subchannel_response.subchannel().data().calls_started(), 1);
- EXPECT_EQ(get_subchannel_response.subchannel().data().calls_succeeded(), 1);
- EXPECT_EQ(get_subchannel_response.subchannel().data().calls_failed(), 0);
- // Get the socket
- ASSERT_GT(get_subchannel_response.subchannel().socket_ref_size(), 0);
- GetSocketRequest get_socket_request;
- GetSocketResponse get_socket_response;
- ClientContext get_socket_context;
- get_socket_request.set_socket_id(
- get_subchannel_response.subchannel().socket_ref(0).socket_id());
- EXPECT_TRUE(
- get_subchannel_response.subchannel().socket_ref(0).name().find("http"));
- s = channelz_stub_->GetSocket(&get_socket_context, get_socket_request,
- &get_socket_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_socket_response.socket().data().streams_started(), 1);
- EXPECT_EQ(get_socket_response.socket().data().streams_succeeded(), 1);
- EXPECT_EQ(get_socket_response.socket().data().streams_failed(), 0);
- EXPECT_EQ(get_socket_response.socket().data().messages_sent(), kNumMessages);
- EXPECT_EQ(get_socket_response.socket().data().messages_received(),
- kNumMessages);
-}
-
-TEST_F(ChannelzServerTest, GetServerSocketsTest) {
- ResetStubs();
- ConfigureProxy(1);
- GetServersRequest get_server_request;
- GetServersResponse get_server_response;
- get_server_request.set_start_server_id(0);
- ClientContext get_server_context;
- Status s = channelz_stub_->GetServers(&get_server_context, get_server_request,
- &get_server_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_server_response.server_size(), 1);
- GetServerSocketsRequest get_server_sockets_request;
- GetServerSocketsResponse get_server_sockets_response;
- get_server_sockets_request.set_server_id(
- get_server_response.server(0).ref().server_id());
- get_server_sockets_request.set_start_socket_id(0);
- ClientContext get_server_sockets_context;
- s = channelz_stub_->GetServerSockets(&get_server_sockets_context,
- get_server_sockets_request,
- &get_server_sockets_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_server_sockets_response.socket_ref_size(), 1);
- EXPECT_TRUE(get_server_sockets_response.socket_ref(0).name().find("http"));
-}
-
-TEST_F(ChannelzServerTest, GetServerSocketsPaginationTest) {
- ResetStubs();
- ConfigureProxy(1);
- std::vector<std::unique_ptr<grpc::testing::EchoTestService::Stub>> stubs;
- const int kNumServerSocketsCreated = 20;
- for (int i = 0; i < kNumServerSocketsCreated; ++i) {
- stubs.push_back(NewEchoStub());
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello channelz");
- request.mutable_param()->set_backend_channel_idx(0);
- ClientContext context;
- Status s = stubs.back()->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- }
- GetServersRequest get_server_request;
- GetServersResponse get_server_response;
- get_server_request.set_start_server_id(0);
- ClientContext get_server_context;
- Status s = channelz_stub_->GetServers(&get_server_context, get_server_request,
- &get_server_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_server_response.server_size(), 1);
- // Make a request that gets all of the serversockets
- {
- GetServerSocketsRequest get_server_sockets_request;
- GetServerSocketsResponse get_server_sockets_response;
- get_server_sockets_request.set_server_id(
- get_server_response.server(0).ref().server_id());
- get_server_sockets_request.set_start_socket_id(0);
- ClientContext get_server_sockets_context;
- s = channelz_stub_->GetServerSockets(&get_server_sockets_context,
- get_server_sockets_request,
- &get_server_sockets_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- // We add one to account the channelz stub that will end up creating
- // a serversocket.
- EXPECT_EQ(get_server_sockets_response.socket_ref_size(),
- kNumServerSocketsCreated + 1);
- EXPECT_TRUE(get_server_sockets_response.end());
- }
- // Now we make a request that exercises pagination.
- {
- GetServerSocketsRequest get_server_sockets_request;
- GetServerSocketsResponse get_server_sockets_response;
- get_server_sockets_request.set_server_id(
- get_server_response.server(0).ref().server_id());
- get_server_sockets_request.set_start_socket_id(0);
- const int kMaxResults = 10;
- get_server_sockets_request.set_max_results(kMaxResults);
- ClientContext get_server_sockets_context;
- s = channelz_stub_->GetServerSockets(&get_server_sockets_context,
- get_server_sockets_request,
- &get_server_sockets_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_server_sockets_response.socket_ref_size(), kMaxResults);
- EXPECT_FALSE(get_server_sockets_response.end());
- }
-}
-
-TEST_F(ChannelzServerTest, GetServerListenSocketsTest) {
- ResetStubs();
- ConfigureProxy(1);
- GetServersRequest get_server_request;
- GetServersResponse get_server_response;
- get_server_request.set_start_server_id(0);
- ClientContext get_server_context;
- Status s = channelz_stub_->GetServers(&get_server_context, get_server_request,
- &get_server_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
- EXPECT_EQ(get_server_response.server_size(), 1);
- EXPECT_EQ(get_server_response.server(0).listen_socket_size(), 1);
- GetSocketRequest get_socket_request;
- GetSocketResponse get_socket_response;
- get_socket_request.set_socket_id(
- get_server_response.server(0).listen_socket(0).socket_id());
- EXPECT_TRUE(
- get_server_response.server(0).listen_socket(0).name().find("http"));
- ClientContext get_socket_context;
- s = channelz_stub_->GetSocket(&get_socket_context, get_socket_request,
- &get_socket_response);
- EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
-}
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ "localhost:" + to_string(backends_[i].port);
+ backend_builder.AddListeningPort(backend_server_address,
+ InsecureServerCredentials());
+ backends_[i].service.reset(new TestServiceImpl);
+ // ensure that the backend itself has channelz disabled.
+ backend_builder.AddChannelArgument(GRPC_ARG_ENABLE_CHANNELZ, 0);
+ backend_builder.RegisterService(backends_[i].service.get());
+ backends_[i].server = backend_builder.BuildAndStart();
+ // set up a channel to the backend. We ensure that this channel has
+ // channelz enabled since these channels (proxy outbound to backends)
+ // are the ones that our test will actually be validating.
+ ChannelArguments args;
+ args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 1);
+ args.SetInt(GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE, 1024);
+ std::shared_ptr<Channel> channel_to_backend = ::grpc::CreateCustomChannel(
+ backend_server_address, InsecureChannelCredentials(), args);
+ proxy_service_.AddChannelToBackend(channel_to_backend);
+ }
+ }
+
+ void ResetStubs() {
+ string target = "dns:localhost:" + to_string(proxy_port_);
+ ChannelArguments args;
+ // disable channelz. We only want to focus on proxy to backend outbound.
+ args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 0);
+ std::shared_ptr<Channel> channel =
+ ::grpc::CreateCustomChannel(target, InsecureChannelCredentials(), args);
+ channelz_stub_ = grpc::channelz::v1::Channelz::NewStub(channel);
+ echo_stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> NewEchoStub() {
+ string target = "dns:localhost:" + to_string(proxy_port_);
+ ChannelArguments args;
+ // disable channelz. We only want to focus on proxy to backend outbound.
+ args.SetInt(GRPC_ARG_ENABLE_CHANNELZ, 0);
+ // This ensures that gRPC will not do connection sharing.
+ args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
+ std::shared_ptr<Channel> channel =
+ ::grpc::CreateCustomChannel(target, InsecureChannelCredentials(), args);
+ return grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ void SendSuccessfulEcho(int channel_idx) {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello channelz");
+ request.mutable_param()->set_backend_channel_idx(channel_idx);
+ ClientContext context;
+ Status s = echo_stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ }
+
+ void SendSuccessfulStream(int num_messages) {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello channelz");
+ ClientContext context;
+ auto stream_to_proxy = echo_stub_->BidiStream(&context);
+ for (int i = 0; i < num_messages; ++i) {
+ EXPECT_TRUE(stream_to_proxy->Write(request));
+ EXPECT_TRUE(stream_to_proxy->Read(&response));
+ }
+ stream_to_proxy->WritesDone();
+ Status s = stream_to_proxy->Finish();
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ }
+
+ void SendFailedEcho(int channel_idx) {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello channelz");
+ request.mutable_param()->set_backend_channel_idx(channel_idx);
+ auto* error = request.mutable_param()->mutable_expected_error();
+ error->set_code(13); // INTERNAL
+ error->set_error_message("error");
+ ClientContext context;
+ Status s = echo_stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ }
+
+ // Uses GetTopChannels to return the channel_id of a particular channel,
+ // so that the unit tests may test GetChannel call.
+ intptr_t GetChannelId(int channel_idx) {
+ GetTopChannelsRequest request;
+ GetTopChannelsResponse response;
+ request.set_start_channel_id(0);
+ ClientContext context;
+ Status s = channelz_stub_->GetTopChannels(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_GT(response.channel_size(), channel_idx);
+ return response.channel(channel_idx).ref().channel_id();
+ }
+
+ static string to_string(const int number) {
+ std::stringstream strs;
+ strs << number;
+ return strs.str();
+ }
+
+ protected:
+ // package of data needed for each backend server.
+ struct BackendData {
+ std::unique_ptr<Server> server;
+ int port;
+ std::unique_ptr<TestServiceImpl> service;
+ };
+
+ std::unique_ptr<grpc::channelz::v1::Channelz::Stub> channelz_stub_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> echo_stub_;
+
+ // proxy server to ping with channelz requests.
+ std::unique_ptr<Server> proxy_server_;
+ int proxy_port_;
+ Proxy proxy_service_;
+
+ // backends. All implement the echo service.
+ std::vector<BackendData> backends_;
+};
+
+TEST_F(ChannelzServerTest, BasicTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ GetTopChannelsRequest request;
+ GetTopChannelsResponse response;
+ request.set_start_channel_id(0);
+ ClientContext context;
+ Status s = channelz_stub_->GetTopChannels(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel_size(), 1);
+}
+
+TEST_F(ChannelzServerTest, HighStartId) {
+ ResetStubs();
+ ConfigureProxy(1);
+ GetTopChannelsRequest request;
+ GetTopChannelsResponse response;
+ request.set_start_channel_id(10000);
+ ClientContext context;
+ Status s = channelz_stub_->GetTopChannels(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel_size(), 0);
+}
+
+TEST_F(ChannelzServerTest, SuccessfulRequestTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ SendSuccessfulEcho(0);
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(0));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(), 1);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), 1);
+ EXPECT_EQ(response.channel().data().calls_failed(), 0);
+}
+
+TEST_F(ChannelzServerTest, FailedRequestTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ SendFailedEcho(0);
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(0));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(), 1);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), 0);
+ EXPECT_EQ(response.channel().data().calls_failed(), 1);
+}
+
+TEST_F(ChannelzServerTest, ManyRequestsTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ // send some RPCs
+ const int kNumSuccess = 10;
+ const int kNumFailed = 11;
+ for (int i = 0; i < kNumSuccess; ++i) {
+ SendSuccessfulEcho(0);
+ }
+ for (int i = 0; i < kNumFailed; ++i) {
+ SendFailedEcho(0);
+ }
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(0));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(),
+ kNumSuccess + kNumFailed);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), kNumSuccess);
+ EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
+}
+
+TEST_F(ChannelzServerTest, ManyChannels) {
+ ResetStubs();
+ const int kNumChannels = 4;
+ ConfigureProxy(kNumChannels);
+ GetTopChannelsRequest request;
+ GetTopChannelsResponse response;
+ request.set_start_channel_id(0);
+ ClientContext context;
+ Status s = channelz_stub_->GetTopChannels(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel_size(), kNumChannels);
+}
+
+TEST_F(ChannelzServerTest, ManyRequestsManyChannels) {
+ ResetStubs();
+ const int kNumChannels = 4;
+ ConfigureProxy(kNumChannels);
+ const int kNumSuccess = 10;
+ const int kNumFailed = 11;
+ for (int i = 0; i < kNumSuccess; ++i) {
+ SendSuccessfulEcho(0);
+ SendSuccessfulEcho(2);
+ }
+ for (int i = 0; i < kNumFailed; ++i) {
+ SendFailedEcho(1);
+ SendFailedEcho(2);
+ }
+
+ // the first channel saw only successes
+ {
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(0));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(), kNumSuccess);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), kNumSuccess);
+ EXPECT_EQ(response.channel().data().calls_failed(), 0);
+ }
+
+ // the second channel saw only failures
+ {
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(1));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(), kNumFailed);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), 0);
+ EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
+ }
+
+ // the third channel saw both
+ {
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(2));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(),
+ kNumSuccess + kNumFailed);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), kNumSuccess);
+ EXPECT_EQ(response.channel().data().calls_failed(), kNumFailed);
+ }
+
+ // the fourth channel saw nothing
+ {
+ GetChannelRequest request;
+ GetChannelResponse response;
+ request.set_channel_id(GetChannelId(3));
+ ClientContext context;
+ Status s = channelz_stub_->GetChannel(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.channel().data().calls_started(), 0);
+ EXPECT_EQ(response.channel().data().calls_succeeded(), 0);
+ EXPECT_EQ(response.channel().data().calls_failed(), 0);
+ }
+}
+
+TEST_F(ChannelzServerTest, ManySubchannels) {
+ ResetStubs();
+ const int kNumChannels = 4;
+ ConfigureProxy(kNumChannels);
+ const int kNumSuccess = 10;
+ const int kNumFailed = 11;
+ for (int i = 0; i < kNumSuccess; ++i) {
+ SendSuccessfulEcho(0);
+ SendSuccessfulEcho(2);
+ }
+ for (int i = 0; i < kNumFailed; ++i) {
+ SendFailedEcho(1);
+ SendFailedEcho(2);
+ }
+ GetTopChannelsRequest gtc_request;
+ GetTopChannelsResponse gtc_response;
+ gtc_request.set_start_channel_id(0);
+ ClientContext context;
+ Status s =
+ channelz_stub_->GetTopChannels(&context, gtc_request, &gtc_response);
+ EXPECT_TRUE(s.ok()) << s.error_message();
+ EXPECT_EQ(gtc_response.channel_size(), kNumChannels);
+ for (int i = 0; i < gtc_response.channel_size(); ++i) {
+ // if the channel sent no RPCs, then expect no subchannels to have been
+ // created.
+ if (gtc_response.channel(i).data().calls_started() == 0) {
+ EXPECT_EQ(gtc_response.channel(i).subchannel_ref_size(), 0);
+ continue;
+ }
+ // The resolver must return at least one address.
+ ASSERT_GT(gtc_response.channel(i).subchannel_ref_size(), 0);
+ GetSubchannelRequest gsc_request;
+ GetSubchannelResponse gsc_response;
+ gsc_request.set_subchannel_id(
+ gtc_response.channel(i).subchannel_ref(0).subchannel_id());
+ ClientContext context;
+ Status s =
+ channelz_stub_->GetSubchannel(&context, gsc_request, &gsc_response);
+ EXPECT_TRUE(s.ok()) << s.error_message();
+ EXPECT_EQ(gtc_response.channel(i).data().calls_started(),
+ gsc_response.subchannel().data().calls_started());
+ EXPECT_EQ(gtc_response.channel(i).data().calls_succeeded(),
+ gsc_response.subchannel().data().calls_succeeded());
+ EXPECT_EQ(gtc_response.channel(i).data().calls_failed(),
+ gsc_response.subchannel().data().calls_failed());
+ }
+}
+
+TEST_F(ChannelzServerTest, BasicServerTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ GetServersRequest request;
+ GetServersResponse response;
+ request.set_start_server_id(0);
+ ClientContext context;
+ Status s = channelz_stub_->GetServers(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.server_size(), 1);
+}
+
+TEST_F(ChannelzServerTest, BasicGetServerTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ GetServersRequest get_servers_request;
+ GetServersResponse get_servers_response;
+ get_servers_request.set_start_server_id(0);
+ ClientContext get_servers_context;
+ Status s = channelz_stub_->GetServers(
+ &get_servers_context, get_servers_request, &get_servers_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_servers_response.server_size(), 1);
+ GetServerRequest get_server_request;
+ GetServerResponse get_server_response;
+ get_server_request.set_server_id(
+ get_servers_response.server(0).ref().server_id());
+ ClientContext get_server_context;
+ s = channelz_stub_->GetServer(&get_server_context, get_server_request,
+ &get_server_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_servers_response.server(0).ref().server_id(),
+ get_server_response.server().ref().server_id());
+}
+
+TEST_F(ChannelzServerTest, ServerCallTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ const int kNumSuccess = 10;
+ const int kNumFailed = 11;
+ for (int i = 0; i < kNumSuccess; ++i) {
+ SendSuccessfulEcho(0);
+ }
+ for (int i = 0; i < kNumFailed; ++i) {
+ SendFailedEcho(0);
+ }
+ GetServersRequest request;
+ GetServersResponse response;
+ request.set_start_server_id(0);
+ ClientContext context;
+ Status s = channelz_stub_->GetServers(&context, request, &response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(response.server_size(), 1);
+ EXPECT_EQ(response.server(0).data().calls_succeeded(), kNumSuccess);
+ EXPECT_EQ(response.server(0).data().calls_failed(), kNumFailed);
+ // This is success+failure+1 because the call that retrieved this information
+ // will be counted as started. It will not track success/failure until after
+ // it has returned, so that is not included in the response.
+ EXPECT_EQ(response.server(0).data().calls_started(),
+ kNumSuccess + kNumFailed + 1);
+}
+
+TEST_F(ChannelzServerTest, ManySubchannelsAndSockets) {
+ ResetStubs();
+ const int kNumChannels = 4;
+ ConfigureProxy(kNumChannels);
+ const int kNumSuccess = 10;
+ const int kNumFailed = 11;
+ for (int i = 0; i < kNumSuccess; ++i) {
+ SendSuccessfulEcho(0);
+ SendSuccessfulEcho(2);
+ }
+ for (int i = 0; i < kNumFailed; ++i) {
+ SendFailedEcho(1);
+ SendFailedEcho(2);
+ }
+ GetTopChannelsRequest gtc_request;
+ GetTopChannelsResponse gtc_response;
+ gtc_request.set_start_channel_id(0);
+ ClientContext context;
+ Status s =
+ channelz_stub_->GetTopChannels(&context, gtc_request, &gtc_response);
+ EXPECT_TRUE(s.ok()) << s.error_message();
+ EXPECT_EQ(gtc_response.channel_size(), kNumChannels);
+ for (int i = 0; i < gtc_response.channel_size(); ++i) {
+ // if the channel sent no RPCs, then expect no subchannels to have been
+ // created.
+ if (gtc_response.channel(i).data().calls_started() == 0) {
+ EXPECT_EQ(gtc_response.channel(i).subchannel_ref_size(), 0);
+ continue;
+ }
+ // The resolver must return at least one address.
+ ASSERT_GT(gtc_response.channel(i).subchannel_ref_size(), 0);
+ // First grab the subchannel
+ GetSubchannelRequest get_subchannel_req;
+ GetSubchannelResponse get_subchannel_resp;
+ get_subchannel_req.set_subchannel_id(
+ gtc_response.channel(i).subchannel_ref(0).subchannel_id());
+ ClientContext get_subchannel_ctx;
+ Status s = channelz_stub_->GetSubchannel(
+ &get_subchannel_ctx, get_subchannel_req, &get_subchannel_resp);
+ EXPECT_TRUE(s.ok()) << s.error_message();
+ EXPECT_EQ(get_subchannel_resp.subchannel().socket_ref_size(), 1);
+ // Now grab the socket.
+ GetSocketRequest get_socket_req;
+ GetSocketResponse get_socket_resp;
+ ClientContext get_socket_ctx;
+ get_socket_req.set_socket_id(
+ get_subchannel_resp.subchannel().socket_ref(0).socket_id());
+ s = channelz_stub_->GetSocket(&get_socket_ctx, get_socket_req,
+ &get_socket_resp);
+ EXPECT_TRUE(
+ get_subchannel_resp.subchannel().socket_ref(0).name().find("http"));
+ EXPECT_TRUE(s.ok()) << s.error_message();
+ // calls started == streams started AND stream succeeded. Since none of
+ // these RPCs were canceled, all of the streams will succeeded even though
+ // the RPCs they represent might have failed.
+ EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_started(),
+ get_socket_resp.socket().data().streams_started());
+ EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_started(),
+ get_socket_resp.socket().data().streams_succeeded());
+ // All of the calls were unary, so calls started == messages sent.
+ EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_started(),
+ get_socket_resp.socket().data().messages_sent());
+ // We only get responses when the RPC was successful, so
+ // calls succeeded == messages received.
+ EXPECT_EQ(get_subchannel_resp.subchannel().data().calls_succeeded(),
+ get_socket_resp.socket().data().messages_received());
+ }
+}
+
+TEST_F(ChannelzServerTest, StreamingRPC) {
+ ResetStubs();
+ ConfigureProxy(1);
+ const int kNumMessages = 5;
+ SendSuccessfulStream(kNumMessages);
+ // Get the channel
+ GetChannelRequest get_channel_request;
+ GetChannelResponse get_channel_response;
+ get_channel_request.set_channel_id(GetChannelId(0));
+ ClientContext get_channel_context;
+ Status s = channelz_stub_->GetChannel(
+ &get_channel_context, get_channel_request, &get_channel_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_channel_response.channel().data().calls_started(), 1);
+ EXPECT_EQ(get_channel_response.channel().data().calls_succeeded(), 1);
+ EXPECT_EQ(get_channel_response.channel().data().calls_failed(), 0);
+ // Get the subchannel
+ ASSERT_GT(get_channel_response.channel().subchannel_ref_size(), 0);
+ GetSubchannelRequest get_subchannel_request;
+ GetSubchannelResponse get_subchannel_response;
+ ClientContext get_subchannel_context;
+ get_subchannel_request.set_subchannel_id(
+ get_channel_response.channel().subchannel_ref(0).subchannel_id());
+ s = channelz_stub_->GetSubchannel(&get_subchannel_context,
+ get_subchannel_request,
+ &get_subchannel_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_subchannel_response.subchannel().data().calls_started(), 1);
+ EXPECT_EQ(get_subchannel_response.subchannel().data().calls_succeeded(), 1);
+ EXPECT_EQ(get_subchannel_response.subchannel().data().calls_failed(), 0);
+ // Get the socket
+ ASSERT_GT(get_subchannel_response.subchannel().socket_ref_size(), 0);
+ GetSocketRequest get_socket_request;
+ GetSocketResponse get_socket_response;
+ ClientContext get_socket_context;
+ get_socket_request.set_socket_id(
+ get_subchannel_response.subchannel().socket_ref(0).socket_id());
+ EXPECT_TRUE(
+ get_subchannel_response.subchannel().socket_ref(0).name().find("http"));
+ s = channelz_stub_->GetSocket(&get_socket_context, get_socket_request,
+ &get_socket_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_socket_response.socket().data().streams_started(), 1);
+ EXPECT_EQ(get_socket_response.socket().data().streams_succeeded(), 1);
+ EXPECT_EQ(get_socket_response.socket().data().streams_failed(), 0);
+ EXPECT_EQ(get_socket_response.socket().data().messages_sent(), kNumMessages);
+ EXPECT_EQ(get_socket_response.socket().data().messages_received(),
+ kNumMessages);
+}
+
+TEST_F(ChannelzServerTest, GetServerSocketsTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ GetServersRequest get_server_request;
+ GetServersResponse get_server_response;
+ get_server_request.set_start_server_id(0);
+ ClientContext get_server_context;
+ Status s = channelz_stub_->GetServers(&get_server_context, get_server_request,
+ &get_server_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_server_response.server_size(), 1);
+ GetServerSocketsRequest get_server_sockets_request;
+ GetServerSocketsResponse get_server_sockets_response;
+ get_server_sockets_request.set_server_id(
+ get_server_response.server(0).ref().server_id());
+ get_server_sockets_request.set_start_socket_id(0);
+ ClientContext get_server_sockets_context;
+ s = channelz_stub_->GetServerSockets(&get_server_sockets_context,
+ get_server_sockets_request,
+ &get_server_sockets_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_server_sockets_response.socket_ref_size(), 1);
+ EXPECT_TRUE(get_server_sockets_response.socket_ref(0).name().find("http"));
+}
+
+TEST_F(ChannelzServerTest, GetServerSocketsPaginationTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ std::vector<std::unique_ptr<grpc::testing::EchoTestService::Stub>> stubs;
+ const int kNumServerSocketsCreated = 20;
+ for (int i = 0; i < kNumServerSocketsCreated; ++i) {
+ stubs.push_back(NewEchoStub());
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello channelz");
+ request.mutable_param()->set_backend_channel_idx(0);
+ ClientContext context;
+ Status s = stubs.back()->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ }
+ GetServersRequest get_server_request;
+ GetServersResponse get_server_response;
+ get_server_request.set_start_server_id(0);
+ ClientContext get_server_context;
+ Status s = channelz_stub_->GetServers(&get_server_context, get_server_request,
+ &get_server_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_server_response.server_size(), 1);
+ // Make a request that gets all of the serversockets
+ {
+ GetServerSocketsRequest get_server_sockets_request;
+ GetServerSocketsResponse get_server_sockets_response;
+ get_server_sockets_request.set_server_id(
+ get_server_response.server(0).ref().server_id());
+ get_server_sockets_request.set_start_socket_id(0);
+ ClientContext get_server_sockets_context;
+ s = channelz_stub_->GetServerSockets(&get_server_sockets_context,
+ get_server_sockets_request,
+ &get_server_sockets_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ // We add one to account the channelz stub that will end up creating
+ // a serversocket.
+ EXPECT_EQ(get_server_sockets_response.socket_ref_size(),
+ kNumServerSocketsCreated + 1);
+ EXPECT_TRUE(get_server_sockets_response.end());
+ }
+ // Now we make a request that exercises pagination.
+ {
+ GetServerSocketsRequest get_server_sockets_request;
+ GetServerSocketsResponse get_server_sockets_response;
+ get_server_sockets_request.set_server_id(
+ get_server_response.server(0).ref().server_id());
+ get_server_sockets_request.set_start_socket_id(0);
+ const int kMaxResults = 10;
+ get_server_sockets_request.set_max_results(kMaxResults);
+ ClientContext get_server_sockets_context;
+ s = channelz_stub_->GetServerSockets(&get_server_sockets_context,
+ get_server_sockets_request,
+ &get_server_sockets_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_server_sockets_response.socket_ref_size(), kMaxResults);
+ EXPECT_FALSE(get_server_sockets_response.end());
+ }
+}
+
+TEST_F(ChannelzServerTest, GetServerListenSocketsTest) {
+ ResetStubs();
+ ConfigureProxy(1);
+ GetServersRequest get_server_request;
+ GetServersResponse get_server_response;
+ get_server_request.set_start_server_id(0);
+ ClientContext get_server_context;
+ Status s = channelz_stub_->GetServers(&get_server_context, get_server_request,
+ &get_server_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+ EXPECT_EQ(get_server_response.server_size(), 1);
+ EXPECT_EQ(get_server_response.server(0).listen_socket_size(), 1);
+ GetSocketRequest get_socket_request;
+ GetSocketResponse get_socket_response;
+ get_socket_request.set_socket_id(
+ get_server_response.server(0).listen_socket(0).socket_id());
+ EXPECT_TRUE(
+ get_server_response.server(0).listen_socket(0).name().find("http"));
+ ClientContext get_socket_context;
+ s = channelz_stub_->GetSocket(&get_socket_context, get_socket_request,
+ &get_socket_response);
+ EXPECT_TRUE(s.ok()) << "s.error_message() = " << s.error_message();
+}
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
index 53efd793d4..12cb40a953 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_callback_end2end_test.cc
@@ -1,32 +1,32 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/generic/generic_stub.h>
-#include <grpcpp/impl/codegen/proto_utils.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-#include <grpcpp/support/client_callback.h>
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/generic/generic_stub.h>
+#include <grpcpp/impl/codegen/proto_utils.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/client_callback.h>
#include <gtest/gtest.h>
-
+
#include <algorithm>
#include <condition_variable>
#include <functional>
@@ -34,330 +34,330 @@
#include <sstream>
#include <thread>
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/iomgr/iomgr.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/interceptors_util.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-#include "test/cpp/util/string_ref_helper.h"
-#include "test/cpp/util/test_credentials_provider.h"
-
-// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
-// should be skipped based on a decision made at SetUp time. In particular, any
-// callback tests can only be run if the iomgr can run in the background or if
-// the transport is in-process.
-#define MAYBE_SKIP_TEST \
- do { \
- if (do_not_test_) { \
- return; \
- } \
- } while (0)
-
-namespace grpc {
-namespace testing {
-namespace {
-
-enum class Protocol { INPROC, TCP };
-
-class TestScenario {
- public:
- TestScenario(bool serve_callback, Protocol protocol, bool intercept,
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/interceptors_util.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+#include "test/cpp/util/string_ref_helper.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
+// should be skipped based on a decision made at SetUp time. In particular, any
+// callback tests can only be run if the iomgr can run in the background or if
+// the transport is in-process.
+#define MAYBE_SKIP_TEST \
+ do { \
+ if (do_not_test_) { \
+ return; \
+ } \
+ } while (0)
+
+namespace grpc {
+namespace testing {
+namespace {
+
+enum class Protocol { INPROC, TCP };
+
+class TestScenario {
+ public:
+ TestScenario(bool serve_callback, Protocol protocol, bool intercept,
const TString& creds_type)
- : callback_server(serve_callback),
- protocol(protocol),
- use_interceptors(intercept),
- credentials_type(creds_type) {}
- void Log() const;
- bool callback_server;
- Protocol protocol;
- bool use_interceptors;
+ : callback_server(serve_callback),
+ protocol(protocol),
+ use_interceptors(intercept),
+ credentials_type(creds_type) {}
+ void Log() const;
+ bool callback_server;
+ Protocol protocol;
+ bool use_interceptors;
const TString credentials_type;
-};
-
-static std::ostream& operator<<(std::ostream& out,
- const TestScenario& scenario) {
- return out << "TestScenario{callback_server="
- << (scenario.callback_server ? "true" : "false") << ",protocol="
- << (scenario.protocol == Protocol::INPROC ? "INPROC" : "TCP")
- << ",intercept=" << (scenario.use_interceptors ? "true" : "false")
- << ",creds=" << scenario.credentials_type << "}";
-}
-
-void TestScenario::Log() const {
- std::ostringstream out;
- out << *this;
- gpr_log(GPR_DEBUG, "%s", out.str().c_str());
-}
-
-class ClientCallbackEnd2endTest
- : public ::testing::TestWithParam<TestScenario> {
- protected:
- ClientCallbackEnd2endTest() { GetParam().Log(); }
-
- void SetUp() override {
- ServerBuilder builder;
-
- auto server_creds = GetCredentialsProvider()->GetServerCredentials(
- GetParam().credentials_type);
- // TODO(vjpai): Support testing of AuthMetadataProcessor
-
- if (GetParam().protocol == Protocol::TCP) {
- picked_port_ = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << picked_port_;
- builder.AddListeningPort(server_address_.str(), server_creds);
- }
- if (!GetParam().callback_server) {
- builder.RegisterService(&service_);
- } else {
- builder.RegisterService(&callback_service_);
- }
-
- if (GetParam().use_interceptors) {
- std::vector<
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy server interceptors
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- }
-
- server_ = builder.BuildAndStart();
- is_server_started_ = true;
- if (GetParam().protocol == Protocol::TCP &&
- !grpc_iomgr_run_in_background()) {
- do_not_test_ = true;
- }
- }
-
- void ResetStub() {
- ChannelArguments args;
- auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
- GetParam().credentials_type, &args);
- switch (GetParam().protocol) {
- case Protocol::TCP:
- if (!GetParam().use_interceptors) {
- channel_ = ::grpc::CreateCustomChannel(server_address_.str(),
- channel_creds, args);
- } else {
- channel_ = CreateCustomChannelWithInterceptors(
- server_address_.str(), channel_creds, args,
- CreateDummyClientInterceptors());
- }
- break;
- case Protocol::INPROC:
- if (!GetParam().use_interceptors) {
- channel_ = server_->InProcessChannel(args);
- } else {
- channel_ = server_->experimental().InProcessChannelWithInterceptors(
- args, CreateDummyClientInterceptors());
- }
- break;
- default:
- assert(false);
- }
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- generic_stub_.reset(new GenericStub(channel_));
- DummyInterceptor::Reset();
- }
-
- void TearDown() override {
- if (is_server_started_) {
- // Although we would normally do an explicit shutdown, the server
- // should also work correctly with just a destructor call. The regular
- // end2end test uses explicit shutdown, so let this one just do reset.
- server_.reset();
- }
- if (picked_port_ > 0) {
- grpc_recycle_unused_port(picked_port_);
- }
- }
-
- void SendRpcs(int num_rpcs, bool with_binary_metadata) {
+};
+
+static std::ostream& operator<<(std::ostream& out,
+ const TestScenario& scenario) {
+ return out << "TestScenario{callback_server="
+ << (scenario.callback_server ? "true" : "false") << ",protocol="
+ << (scenario.protocol == Protocol::INPROC ? "INPROC" : "TCP")
+ << ",intercept=" << (scenario.use_interceptors ? "true" : "false")
+ << ",creds=" << scenario.credentials_type << "}";
+}
+
+void TestScenario::Log() const {
+ std::ostringstream out;
+ out << *this;
+ gpr_log(GPR_DEBUG, "%s", out.str().c_str());
+}
+
+class ClientCallbackEnd2endTest
+ : public ::testing::TestWithParam<TestScenario> {
+ protected:
+ ClientCallbackEnd2endTest() { GetParam().Log(); }
+
+ void SetUp() override {
+ ServerBuilder builder;
+
+ auto server_creds = GetCredentialsProvider()->GetServerCredentials(
+ GetParam().credentials_type);
+ // TODO(vjpai): Support testing of AuthMetadataProcessor
+
+ if (GetParam().protocol == Protocol::TCP) {
+ picked_port_ = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << picked_port_;
+ builder.AddListeningPort(server_address_.str(), server_creds);
+ }
+ if (!GetParam().callback_server) {
+ builder.RegisterService(&service_);
+ } else {
+ builder.RegisterService(&callback_service_);
+ }
+
+ if (GetParam().use_interceptors) {
+ std::vector<
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy server interceptors
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ }
+
+ server_ = builder.BuildAndStart();
+ is_server_started_ = true;
+ if (GetParam().protocol == Protocol::TCP &&
+ !grpc_iomgr_run_in_background()) {
+ do_not_test_ = true;
+ }
+ }
+
+ void ResetStub() {
+ ChannelArguments args;
+ auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
+ GetParam().credentials_type, &args);
+ switch (GetParam().protocol) {
+ case Protocol::TCP:
+ if (!GetParam().use_interceptors) {
+ channel_ = ::grpc::CreateCustomChannel(server_address_.str(),
+ channel_creds, args);
+ } else {
+ channel_ = CreateCustomChannelWithInterceptors(
+ server_address_.str(), channel_creds, args,
+ CreateDummyClientInterceptors());
+ }
+ break;
+ case Protocol::INPROC:
+ if (!GetParam().use_interceptors) {
+ channel_ = server_->InProcessChannel(args);
+ } else {
+ channel_ = server_->experimental().InProcessChannelWithInterceptors(
+ args, CreateDummyClientInterceptors());
+ }
+ break;
+ default:
+ assert(false);
+ }
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ generic_stub_.reset(new GenericStub(channel_));
+ DummyInterceptor::Reset();
+ }
+
+ void TearDown() override {
+ if (is_server_started_) {
+ // Although we would normally do an explicit shutdown, the server
+ // should also work correctly with just a destructor call. The regular
+ // end2end test uses explicit shutdown, so let this one just do reset.
+ server_.reset();
+ }
+ if (picked_port_ > 0) {
+ grpc_recycle_unused_port(picked_port_);
+ }
+ }
+
+ void SendRpcs(int num_rpcs, bool with_binary_metadata) {
TString test_string("");
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest request;
- EchoResponse response;
- ClientContext cli_ctx;
-
- test_string += "Hello world. ";
- request.set_message(test_string);
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext cli_ctx;
+
+ test_string += "Hello world. ";
+ request.set_message(test_string);
TString val;
- if (with_binary_metadata) {
- request.mutable_param()->set_echo_metadata(true);
- char bytes[8] = {'\0', '\1', '\2', '\3',
- '\4', '\5', '\6', static_cast<char>(i)};
+ if (with_binary_metadata) {
+ request.mutable_param()->set_echo_metadata(true);
+ char bytes[8] = {'\0', '\1', '\2', '\3',
+ '\4', '\5', '\6', static_cast<char>(i)};
val = TString(bytes, 8);
- cli_ctx.AddMetadata("custom-bin", val);
- }
-
- cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
-
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- stub_->experimental_async()->Echo(
- &cli_ctx, &request, &response,
- [&cli_ctx, &request, &response, &done, &mu, &cv, val,
- with_binary_metadata](Status s) {
- GPR_ASSERT(s.ok());
-
- EXPECT_EQ(request.message(), response.message());
- if (with_binary_metadata) {
- EXPECT_EQ(
- 1u, cli_ctx.GetServerTrailingMetadata().count("custom-bin"));
- EXPECT_EQ(val, ToString(cli_ctx.GetServerTrailingMetadata()
- .find("custom-bin")
- ->second));
- }
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
- }
- }
-
- void SendRpcsGeneric(int num_rpcs, bool maybe_except) {
+ cli_ctx.AddMetadata("custom-bin", val);
+ }
+
+ cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ stub_->experimental_async()->Echo(
+ &cli_ctx, &request, &response,
+ [&cli_ctx, &request, &response, &done, &mu, &cv, val,
+ with_binary_metadata](Status s) {
+ GPR_ASSERT(s.ok());
+
+ EXPECT_EQ(request.message(), response.message());
+ if (with_binary_metadata) {
+ EXPECT_EQ(
+ 1u, cli_ctx.GetServerTrailingMetadata().count("custom-bin"));
+ EXPECT_EQ(val, ToString(cli_ctx.GetServerTrailingMetadata()
+ .find("custom-bin")
+ ->second));
+ }
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+ }
+ }
+
+ void SendRpcsGeneric(int num_rpcs, bool maybe_except) {
const TString kMethodName("/grpc.testing.EchoTestService/Echo");
TString test_string("");
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest request;
- std::unique_ptr<ByteBuffer> send_buf;
- ByteBuffer recv_buf;
- ClientContext cli_ctx;
-
- test_string += "Hello world. ";
- request.set_message(test_string);
- send_buf = SerializeToByteBuffer(&request);
-
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- generic_stub_->experimental().UnaryCall(
- &cli_ctx, kMethodName, send_buf.get(), &recv_buf,
- [&request, &recv_buf, &done, &mu, &cv, maybe_except](Status s) {
- GPR_ASSERT(s.ok());
-
- EchoResponse response;
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buf, &response));
- EXPECT_EQ(request.message(), response.message());
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
-#if GRPC_ALLOW_EXCEPTIONS
- if (maybe_except) {
- throw - 1;
- }
-#else
- GPR_ASSERT(!maybe_except);
-#endif
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
- }
- }
-
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest request;
+ std::unique_ptr<ByteBuffer> send_buf;
+ ByteBuffer recv_buf;
+ ClientContext cli_ctx;
+
+ test_string += "Hello world. ";
+ request.set_message(test_string);
+ send_buf = SerializeToByteBuffer(&request);
+
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ generic_stub_->experimental().UnaryCall(
+ &cli_ctx, kMethodName, send_buf.get(), &recv_buf,
+ [&request, &recv_buf, &done, &mu, &cv, maybe_except](Status s) {
+ GPR_ASSERT(s.ok());
+
+ EchoResponse response;
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buf, &response));
+ EXPECT_EQ(request.message(), response.message());
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+#if GRPC_ALLOW_EXCEPTIONS
+ if (maybe_except) {
+ throw - 1;
+ }
+#else
+ GPR_ASSERT(!maybe_except);
+#endif
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+ }
+ }
+
void SendGenericEchoAsBidi(int num_rpcs, int reuses, bool do_writes_done) {
const TString kMethodName("/grpc.testing.EchoTestService/Echo");
TString test_string("");
- for (int i = 0; i < num_rpcs; i++) {
- test_string += "Hello world. ";
- class Client : public grpc::experimental::ClientBidiReactor<ByteBuffer,
- ByteBuffer> {
- public:
+ for (int i = 0; i < num_rpcs; i++) {
+ test_string += "Hello world. ";
+ class Client : public grpc::experimental::ClientBidiReactor<ByteBuffer,
+ ByteBuffer> {
+ public:
Client(ClientCallbackEnd2endTest* test, const TString& method_name,
const TString& test_str, int reuses, bool do_writes_done)
: reuses_remaining_(reuses), do_writes_done_(do_writes_done) {
- activate_ = [this, test, method_name, test_str] {
- if (reuses_remaining_ > 0) {
- cli_ctx_.reset(new ClientContext);
- reuses_remaining_--;
- test->generic_stub_->experimental().PrepareBidiStreamingCall(
- cli_ctx_.get(), method_name, this);
- request_.set_message(test_str);
- send_buf_ = SerializeToByteBuffer(&request_);
- StartWrite(send_buf_.get());
- StartRead(&recv_buf_);
- StartCall();
- } else {
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- };
- activate_();
- }
+ activate_ = [this, test, method_name, test_str] {
+ if (reuses_remaining_ > 0) {
+ cli_ctx_.reset(new ClientContext);
+ reuses_remaining_--;
+ test->generic_stub_->experimental().PrepareBidiStreamingCall(
+ cli_ctx_.get(), method_name, this);
+ request_.set_message(test_str);
+ send_buf_ = SerializeToByteBuffer(&request_);
+ StartWrite(send_buf_.get());
+ StartRead(&recv_buf_);
+ StartCall();
+ } else {
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ };
+ activate_();
+ }
void OnWriteDone(bool /*ok*/) override {
if (do_writes_done_) {
StartWritesDone();
}
}
- void OnReadDone(bool /*ok*/) override {
- EchoResponse response;
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response));
- EXPECT_EQ(request_.message(), response.message());
- };
- void OnDone(const Status& s) override {
- EXPECT_TRUE(s.ok());
- activate_();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- EchoRequest request_;
- std::unique_ptr<ByteBuffer> send_buf_;
- ByteBuffer recv_buf_;
- std::unique_ptr<ClientContext> cli_ctx_;
- int reuses_remaining_;
- std::function<void()> activate_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_ = false;
+ void OnReadDone(bool /*ok*/) override {
+ EchoResponse response;
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buf_, &response));
+ EXPECT_EQ(request_.message(), response.message());
+ };
+ void OnDone(const Status& s) override {
+ EXPECT_TRUE(s.ok());
+ activate_();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ EchoRequest request_;
+ std::unique_ptr<ByteBuffer> send_buf_;
+ ByteBuffer recv_buf_;
+ std::unique_ptr<ClientContext> cli_ctx_;
+ int reuses_remaining_;
+ std::function<void()> activate_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_ = false;
const bool do_writes_done_;
};
-
+
Client rpc(this, kMethodName, test_string, reuses, do_writes_done);
- rpc.Await();
- }
- }
- bool do_not_test_{false};
- bool is_server_started_{false};
- int picked_port_{0};
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<grpc::GenericStub> generic_stub_;
- TestServiceImpl service_;
- CallbackTestServiceImpl callback_service_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
-};
-
-TEST_P(ClientCallbackEnd2endTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpcs(1, false);
-}
-
+ rpc.Await();
+ }
+ }
+ bool do_not_test_{false};
+ bool is_server_started_{false};
+ int picked_port_{0};
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<grpc::GenericStub> generic_stub_;
+ TestServiceImpl service_;
+ CallbackTestServiceImpl callback_service_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+};
+
+TEST_P(ClientCallbackEnd2endTest, SimpleRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpcs(1, false);
+}
+
TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
- MAYBE_SKIP_TEST;
- ResetStub();
+ MAYBE_SKIP_TEST;
+ ResetStub();
EchoRequest request;
EchoResponse response;
@@ -370,9 +370,9 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
*request.mutable_param()->mutable_expected_error() = error_status;
std::mutex mu;
- std::condition_variable cv;
- bool done = false;
-
+ std::condition_variable cv;
+ bool done = false;
+
stub_->experimental_async()->Echo(
&cli_ctx, &request, &response,
[&response, &done, &mu, &cv, &error_status](Status s) {
@@ -385,11 +385,11 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
});
std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
-}
-
+ while (!done) {
+ cv.wait(l);
+ }
+}
+
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
MAYBE_SKIP_TEST;
ResetStub();
@@ -451,415 +451,415 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
}
}
-TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
- MAYBE_SKIP_TEST;
- ResetStub();
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- EchoRequest request;
- request.set_message("Hello locked world.");
- EchoResponse response;
- ClientContext cli_ctx;
- {
- std::lock_guard<std::mutex> l(mu);
- stub_->experimental_async()->Echo(
- &cli_ctx, &request, &response,
- [&mu, &cv, &done, &request, &response](Status s) {
- std::lock_guard<std::mutex> l(mu);
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(request.message(), response.message());
- done = true;
- cv.notify_one();
- });
- }
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, SequentialRpcs) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpcs(10, false);
-}
-
-TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SimpleRequest request;
- SimpleResponse response;
- ClientContext cli_ctx;
-
- cli_ctx.AddMetadata(kCheckClientInitialMetadataKey,
- kCheckClientInitialMetadataVal);
-
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- stub_->experimental_async()->CheckClientInitialMetadata(
- &cli_ctx, &request, &response, [&done, &mu, &cv](Status s) {
- GPR_ASSERT(s.ok());
-
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, SimpleRpcWithBinaryMetadata) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpcs(1, true);
-}
-
-TEST_P(ClientCallbackEnd2endTest, SequentialRpcsWithVariedBinaryMetadataValue) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpcs(10, true);
-}
-
-TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpcsGeneric(10, false);
-}
-
-TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) {
- MAYBE_SKIP_TEST;
- ResetStub();
+TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ EchoRequest request;
+ request.set_message("Hello locked world.");
+ EchoResponse response;
+ ClientContext cli_ctx;
+ {
+ std::lock_guard<std::mutex> l(mu);
+ stub_->experimental_async()->Echo(
+ &cli_ctx, &request, &response,
+ [&mu, &cv, &done, &request, &response](Status s) {
+ std::lock_guard<std::mutex> l(mu);
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(request.message(), response.message());
+ done = true;
+ cv.notify_one();
+ });
+ }
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, SequentialRpcs) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpcs(10, false);
+}
+
+TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SimpleRequest request;
+ SimpleResponse response;
+ ClientContext cli_ctx;
+
+ cli_ctx.AddMetadata(kCheckClientInitialMetadataKey,
+ kCheckClientInitialMetadataVal);
+
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ stub_->experimental_async()->CheckClientInitialMetadata(
+ &cli_ctx, &request, &response, [&done, &mu, &cv](Status s) {
+ GPR_ASSERT(s.ok());
+
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, SimpleRpcWithBinaryMetadata) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpcs(1, true);
+}
+
+TEST_P(ClientCallbackEnd2endTest, SequentialRpcsWithVariedBinaryMetadataValue) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpcs(10, true);
+}
+
+TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpcsGeneric(10, false);
+}
+
+TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true);
-}
-
-TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) {
- MAYBE_SKIP_TEST;
- ResetStub();
+}
+
+TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true);
-}
-
+}
+
TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) {
MAYBE_SKIP_TEST;
ResetStub();
SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false);
}
-#if GRPC_ALLOW_EXCEPTIONS
-TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpcsGeneric(10, true);
-}
-#endif
-
-TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
- MAYBE_SKIP_TEST;
- ResetStub();
- std::vector<std::thread> threads;
- threads.reserve(10);
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back([this] { SendRpcs(10, true); });
- }
- for (int i = 0; i < 10; ++i) {
- threads[i].join();
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) {
- MAYBE_SKIP_TEST;
- ResetStub();
- std::vector<std::thread> threads;
- threads.reserve(10);
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back([this] { SendRpcs(10, false); });
- }
- for (int i = 0; i < 10; ++i) {
- threads[i].join();
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
- context.TryCancel();
-
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- stub_->experimental_async()->Echo(
- &context, &request, &response, [&response, &done, &mu, &cv](Status s) {
- EXPECT_EQ("", response.message());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
- context.AddMetadata(kServerTryCancelRequest,
+#if GRPC_ALLOW_EXCEPTIONS
+TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpcsGeneric(10, true);
+}
+#endif
+
+TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ std::vector<std::thread> threads;
+ threads.reserve(10);
+ for (int i = 0; i < 10; ++i) {
+ threads.emplace_back([this] { SendRpcs(10, true); });
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i].join();
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ std::vector<std::thread> threads;
+ threads.reserve(10);
+ for (int i = 0; i < 10; ++i) {
+ threads.emplace_back([this] { SendRpcs(10, false); });
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i].join();
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+ context.TryCancel();
+
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ stub_->experimental_async()->Echo(
+ &context, &request, &response, [&response, &done, &mu, &cv](Status s) {
+ EXPECT_EQ("", response.message());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+ context.AddMetadata(kServerTryCancelRequest,
ToString(CANCEL_BEFORE_PROCESSING));
-
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- stub_->experimental_async()->Echo(
- &context, &request, &response, [&done, &mu, &cv](Status s) {
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
-}
-
-struct ClientCancelInfo {
- bool cancel{false};
- int ops_before_cancel;
-
- ClientCancelInfo() : cancel{false} {}
- explicit ClientCancelInfo(int ops) : cancel{true}, ops_before_cancel{ops} {}
-};
-
-class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> {
- public:
- WriteClient(grpc::testing::EchoTestService::Stub* stub,
- ServerTryCancelRequestPhase server_try_cancel,
- int num_msgs_to_send, ClientCancelInfo client_cancel = {})
- : server_try_cancel_(server_try_cancel),
- num_msgs_to_send_(num_msgs_to_send),
- client_cancel_{client_cancel} {
+
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ stub_->experimental_async()->Echo(
+ &context, &request, &response, [&done, &mu, &cv](Status s) {
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+}
+
+struct ClientCancelInfo {
+ bool cancel{false};
+ int ops_before_cancel;
+
+ ClientCancelInfo() : cancel{false} {}
+ explicit ClientCancelInfo(int ops) : cancel{true}, ops_before_cancel{ops} {}
+};
+
+class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> {
+ public:
+ WriteClient(grpc::testing::EchoTestService::Stub* stub,
+ ServerTryCancelRequestPhase server_try_cancel,
+ int num_msgs_to_send, ClientCancelInfo client_cancel = {})
+ : server_try_cancel_(server_try_cancel),
+ num_msgs_to_send_(num_msgs_to_send),
+ client_cancel_{client_cancel} {
TString msg{"Hello server."};
- for (int i = 0; i < num_msgs_to_send; i++) {
- desired_ += msg;
- }
- if (server_try_cancel != DO_NOT_CANCEL) {
- // Send server_try_cancel value in the client metadata
- context_.AddMetadata(kServerTryCancelRequest,
+ for (int i = 0; i < num_msgs_to_send; i++) {
+ desired_ += msg;
+ }
+ if (server_try_cancel != DO_NOT_CANCEL) {
+ // Send server_try_cancel value in the client metadata
+ context_.AddMetadata(kServerTryCancelRequest,
ToString(server_try_cancel));
- }
- context_.set_initial_metadata_corked(true);
- stub->experimental_async()->RequestStream(&context_, &response_, this);
- StartCall();
- request_.set_message(msg);
- MaybeWrite();
- }
- void OnWriteDone(bool ok) override {
- if (ok) {
- num_msgs_sent_++;
- MaybeWrite();
- }
- }
- void OnDone(const Status& s) override {
- gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent_);
- int num_to_send =
- (client_cancel_.cancel)
- ? std::min(num_msgs_to_send_, client_cancel_.ops_before_cancel)
- : num_msgs_to_send_;
- switch (server_try_cancel_) {
- case CANCEL_BEFORE_PROCESSING:
- case CANCEL_DURING_PROCESSING:
- // If the RPC is canceled by server before / during messages from the
- // client, it means that the client most likely did not get a chance to
- // send all the messages it wanted to send. i.e num_msgs_sent <=
- // num_msgs_to_send
- EXPECT_LE(num_msgs_sent_, num_to_send);
- break;
- case DO_NOT_CANCEL:
- case CANCEL_AFTER_PROCESSING:
- // If the RPC was not canceled or canceled after all messages were read
- // by the server, the client did get a chance to send all its messages
- EXPECT_EQ(num_msgs_sent_, num_to_send);
- break;
- default:
- assert(false);
- break;
- }
- if ((server_try_cancel_ == DO_NOT_CANCEL) && !client_cancel_.cancel) {
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(response_.message(), desired_);
- } else {
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- }
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- private:
- void MaybeWrite() {
- if (client_cancel_.cancel &&
- num_msgs_sent_ == client_cancel_.ops_before_cancel) {
- context_.TryCancel();
- } else if (num_msgs_to_send_ > num_msgs_sent_ + 1) {
- StartWrite(&request_);
- } else if (num_msgs_to_send_ == num_msgs_sent_ + 1) {
- StartWriteLast(&request_, WriteOptions());
- }
- }
- EchoRequest request_;
- EchoResponse response_;
- ClientContext context_;
- const ServerTryCancelRequestPhase server_try_cancel_;
- int num_msgs_sent_{0};
- const int num_msgs_to_send_;
+ }
+ context_.set_initial_metadata_corked(true);
+ stub->experimental_async()->RequestStream(&context_, &response_, this);
+ StartCall();
+ request_.set_message(msg);
+ MaybeWrite();
+ }
+ void OnWriteDone(bool ok) override {
+ if (ok) {
+ num_msgs_sent_++;
+ MaybeWrite();
+ }
+ }
+ void OnDone(const Status& s) override {
+ gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent_);
+ int num_to_send =
+ (client_cancel_.cancel)
+ ? std::min(num_msgs_to_send_, client_cancel_.ops_before_cancel)
+ : num_msgs_to_send_;
+ switch (server_try_cancel_) {
+ case CANCEL_BEFORE_PROCESSING:
+ case CANCEL_DURING_PROCESSING:
+ // If the RPC is canceled by server before / during messages from the
+ // client, it means that the client most likely did not get a chance to
+ // send all the messages it wanted to send. i.e num_msgs_sent <=
+ // num_msgs_to_send
+ EXPECT_LE(num_msgs_sent_, num_to_send);
+ break;
+ case DO_NOT_CANCEL:
+ case CANCEL_AFTER_PROCESSING:
+ // If the RPC was not canceled or canceled after all messages were read
+ // by the server, the client did get a chance to send all its messages
+ EXPECT_EQ(num_msgs_sent_, num_to_send);
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ if ((server_try_cancel_ == DO_NOT_CANCEL) && !client_cancel_.cancel) {
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(response_.message(), desired_);
+ } else {
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ }
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ void MaybeWrite() {
+ if (client_cancel_.cancel &&
+ num_msgs_sent_ == client_cancel_.ops_before_cancel) {
+ context_.TryCancel();
+ } else if (num_msgs_to_send_ > num_msgs_sent_ + 1) {
+ StartWrite(&request_);
+ } else if (num_msgs_to_send_ == num_msgs_sent_ + 1) {
+ StartWriteLast(&request_, WriteOptions());
+ }
+ }
+ EchoRequest request_;
+ EchoResponse response_;
+ ClientContext context_;
+ const ServerTryCancelRequestPhase server_try_cancel_;
+ int num_msgs_sent_{0};
+ const int num_msgs_to_send_;
TString desired_;
- const ClientCancelInfo client_cancel_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_ = false;
-};
-
-TEST_P(ClientCallbackEnd2endTest, RequestStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- WriteClient test{stub_.get(), DO_NOT_CANCEL, 3};
- test.Await();
- // Make sure that the server interceptors were not notified to cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, ClientCancelsRequestStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- WriteClient test{stub_.get(), DO_NOT_CANCEL, 3, ClientCancelInfo{2}};
- test.Await();
- // Make sure that the server interceptors got the cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel before doing reading the request
-TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelBeforeReads) {
- MAYBE_SKIP_TEST;
- ResetStub();
- WriteClient test{stub_.get(), CANCEL_BEFORE_PROCESSING, 1};
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel while reading a request from the stream in parallel
-TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelDuringRead) {
- MAYBE_SKIP_TEST;
- ResetStub();
- WriteClient test{stub_.get(), CANCEL_DURING_PROCESSING, 10};
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel after reading all the requests but before returning to the
-// client
-TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelAfterReads) {
- MAYBE_SKIP_TEST;
- ResetStub();
- WriteClient test{stub_.get(), CANCEL_AFTER_PROCESSING, 4};
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
- MAYBE_SKIP_TEST;
- ResetStub();
- class UnaryClient : public grpc::experimental::ClientUnaryReactor {
- public:
- UnaryClient(grpc::testing::EchoTestService::Stub* stub) {
- cli_ctx_.AddMetadata("key1", "val1");
- cli_ctx_.AddMetadata("key2", "val2");
- request_.mutable_param()->set_echo_metadata_initially(true);
- request_.set_message("Hello metadata");
- stub->experimental_async()->Echo(&cli_ctx_, &request_, &response_, this);
- StartCall();
- }
- void OnReadInitialMetadataDone(bool ok) override {
- EXPECT_TRUE(ok);
- EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key1"));
- EXPECT_EQ(
- "val1",
- ToString(cli_ctx_.GetServerInitialMetadata().find("key1")->second));
- EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key2"));
- EXPECT_EQ(
- "val2",
- ToString(cli_ctx_.GetServerInitialMetadata().find("key2")->second));
- initial_metadata_done_ = true;
- }
- void OnDone(const Status& s) override {
- EXPECT_TRUE(initial_metadata_done_);
- EXPECT_EQ(0u, cli_ctx_.GetServerTrailingMetadata().size());
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(request_.message(), response_.message());
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- private:
- EchoRequest request_;
- EchoResponse response_;
- ClientContext cli_ctx_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_{false};
- bool initial_metadata_done_{false};
- };
-
- UnaryClient test{stub_.get()};
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
+ const ClientCancelInfo client_cancel_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_ = false;
+};
+
+TEST_P(ClientCallbackEnd2endTest, RequestStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ WriteClient test{stub_.get(), DO_NOT_CANCEL, 3};
+ test.Await();
+ // Make sure that the server interceptors were not notified to cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, ClientCancelsRequestStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ WriteClient test{stub_.get(), DO_NOT_CANCEL, 3, ClientCancelInfo{2}};
+ test.Await();
+ // Make sure that the server interceptors got the cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel before doing reading the request
+TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelBeforeReads) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ WriteClient test{stub_.get(), CANCEL_BEFORE_PROCESSING, 1};
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel while reading a request from the stream in parallel
+TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelDuringRead) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ WriteClient test{stub_.get(), CANCEL_DURING_PROCESSING, 10};
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel after reading all the requests but before returning to the
+// client
+TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelAfterReads) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ WriteClient test{stub_.get(), CANCEL_AFTER_PROCESSING, 4};
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ class UnaryClient : public grpc::experimental::ClientUnaryReactor {
+ public:
+ UnaryClient(grpc::testing::EchoTestService::Stub* stub) {
+ cli_ctx_.AddMetadata("key1", "val1");
+ cli_ctx_.AddMetadata("key2", "val2");
+ request_.mutable_param()->set_echo_metadata_initially(true);
+ request_.set_message("Hello metadata");
+ stub->experimental_async()->Echo(&cli_ctx_, &request_, &response_, this);
+ StartCall();
+ }
+ void OnReadInitialMetadataDone(bool ok) override {
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key1"));
+ EXPECT_EQ(
+ "val1",
+ ToString(cli_ctx_.GetServerInitialMetadata().find("key1")->second));
+ EXPECT_EQ(1u, cli_ctx_.GetServerInitialMetadata().count("key2"));
+ EXPECT_EQ(
+ "val2",
+ ToString(cli_ctx_.GetServerInitialMetadata().find("key2")->second));
+ initial_metadata_done_ = true;
+ }
+ void OnDone(const Status& s) override {
+ EXPECT_TRUE(initial_metadata_done_);
+ EXPECT_EQ(0u, cli_ctx_.GetServerTrailingMetadata().size());
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(request_.message(), response_.message());
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ EchoRequest request_;
+ EchoResponse response_;
+ ClientContext cli_ctx_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_{false};
+ bool initial_metadata_done_{false};
+ };
+
+ UnaryClient test{stub_.get()};
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
MAYBE_SKIP_TEST;
ResetStub();
@@ -926,270 +926,270 @@ TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
}
}
-class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
- public:
- ReadClient(grpc::testing::EchoTestService::Stub* stub,
- ServerTryCancelRequestPhase server_try_cancel,
- ClientCancelInfo client_cancel = {})
- : server_try_cancel_(server_try_cancel), client_cancel_{client_cancel} {
- if (server_try_cancel_ != DO_NOT_CANCEL) {
- // Send server_try_cancel value in the client metadata
- context_.AddMetadata(kServerTryCancelRequest,
+class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
+ public:
+ ReadClient(grpc::testing::EchoTestService::Stub* stub,
+ ServerTryCancelRequestPhase server_try_cancel,
+ ClientCancelInfo client_cancel = {})
+ : server_try_cancel_(server_try_cancel), client_cancel_{client_cancel} {
+ if (server_try_cancel_ != DO_NOT_CANCEL) {
+ // Send server_try_cancel value in the client metadata
+ context_.AddMetadata(kServerTryCancelRequest,
ToString(server_try_cancel));
- }
- request_.set_message("Hello client ");
- stub->experimental_async()->ResponseStream(&context_, &request_, this);
- if (client_cancel_.cancel &&
- reads_complete_ == client_cancel_.ops_before_cancel) {
- context_.TryCancel();
- }
- // Even if we cancel, read until failure because there might be responses
- // pending
- StartRead(&response_);
- StartCall();
- }
- void OnReadDone(bool ok) override {
- if (!ok) {
- if (server_try_cancel_ == DO_NOT_CANCEL && !client_cancel_.cancel) {
- EXPECT_EQ(reads_complete_, kServerDefaultResponseStreamsToSend);
- }
- } else {
- EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
- EXPECT_EQ(response_.message(),
+ }
+ request_.set_message("Hello client ");
+ stub->experimental_async()->ResponseStream(&context_, &request_, this);
+ if (client_cancel_.cancel &&
+ reads_complete_ == client_cancel_.ops_before_cancel) {
+ context_.TryCancel();
+ }
+ // Even if we cancel, read until failure because there might be responses
+ // pending
+ StartRead(&response_);
+ StartCall();
+ }
+ void OnReadDone(bool ok) override {
+ if (!ok) {
+ if (server_try_cancel_ == DO_NOT_CANCEL && !client_cancel_.cancel) {
+ EXPECT_EQ(reads_complete_, kServerDefaultResponseStreamsToSend);
+ }
+ } else {
+ EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
+ EXPECT_EQ(response_.message(),
request_.message() + ToString(reads_complete_));
- reads_complete_++;
- if (client_cancel_.cancel &&
- reads_complete_ == client_cancel_.ops_before_cancel) {
- context_.TryCancel();
- }
- // Even if we cancel, read until failure because there might be responses
- // pending
- StartRead(&response_);
- }
- }
- void OnDone(const Status& s) override {
- gpr_log(GPR_INFO, "Read %d messages", reads_complete_);
- switch (server_try_cancel_) {
- case DO_NOT_CANCEL:
- if (!client_cancel_.cancel || client_cancel_.ops_before_cancel >
- kServerDefaultResponseStreamsToSend) {
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(reads_complete_, kServerDefaultResponseStreamsToSend);
- } else {
- EXPECT_GE(reads_complete_, client_cancel_.ops_before_cancel);
- EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
- // Status might be ok or cancelled depending on whether server
- // sent status before client cancel went through
- if (!s.ok()) {
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- }
- }
- break;
- case CANCEL_BEFORE_PROCESSING:
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- EXPECT_EQ(reads_complete_, 0);
- break;
- case CANCEL_DURING_PROCESSING:
- case CANCEL_AFTER_PROCESSING:
- // If server canceled while writing messages, client must have read
- // less than or equal to the expected number of messages. Even if the
- // server canceled after writing all messages, the RPC may be canceled
- // before the Client got a chance to read all the messages.
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
- break;
- default:
- assert(false);
- }
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- private:
- EchoRequest request_;
- EchoResponse response_;
- ClientContext context_;
- const ServerTryCancelRequestPhase server_try_cancel_;
- int reads_complete_{0};
- const ClientCancelInfo client_cancel_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_ = false;
-};
-
-TEST_P(ClientCallbackEnd2endTest, ResponseStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ReadClient test{stub_.get(), DO_NOT_CANCEL};
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ReadClient test{stub_.get(), DO_NOT_CANCEL, ClientCancelInfo{2}};
- test.Await();
- // Because cancel in this case races with server finish, we can't be sure that
- // server interceptors even see cancellation
-}
-
-// Server to cancel before sending any response messages
-TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelBefore) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ReadClient test{stub_.get(), CANCEL_BEFORE_PROCESSING};
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel while writing a response to the stream in parallel
-TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelDuring) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ReadClient test{stub_.get(), CANCEL_DURING_PROCESSING};
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel after writing all the respones to the stream but before
-// returning to the client
-TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelAfter) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ReadClient test{stub_.get(), CANCEL_AFTER_PROCESSING};
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-class BidiClient
- : public grpc::experimental::ClientBidiReactor<EchoRequest, EchoResponse> {
- public:
- BidiClient(grpc::testing::EchoTestService::Stub* stub,
- ServerTryCancelRequestPhase server_try_cancel,
+ reads_complete_++;
+ if (client_cancel_.cancel &&
+ reads_complete_ == client_cancel_.ops_before_cancel) {
+ context_.TryCancel();
+ }
+ // Even if we cancel, read until failure because there might be responses
+ // pending
+ StartRead(&response_);
+ }
+ }
+ void OnDone(const Status& s) override {
+ gpr_log(GPR_INFO, "Read %d messages", reads_complete_);
+ switch (server_try_cancel_) {
+ case DO_NOT_CANCEL:
+ if (!client_cancel_.cancel || client_cancel_.ops_before_cancel >
+ kServerDefaultResponseStreamsToSend) {
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(reads_complete_, kServerDefaultResponseStreamsToSend);
+ } else {
+ EXPECT_GE(reads_complete_, client_cancel_.ops_before_cancel);
+ EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
+ // Status might be ok or cancelled depending on whether server
+ // sent status before client cancel went through
+ if (!s.ok()) {
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ }
+ }
+ break;
+ case CANCEL_BEFORE_PROCESSING:
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ EXPECT_EQ(reads_complete_, 0);
+ break;
+ case CANCEL_DURING_PROCESSING:
+ case CANCEL_AFTER_PROCESSING:
+ // If server canceled while writing messages, client must have read
+ // less than or equal to the expected number of messages. Even if the
+ // server canceled after writing all messages, the RPC may be canceled
+ // before the Client got a chance to read all the messages.
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ EXPECT_LE(reads_complete_, kServerDefaultResponseStreamsToSend);
+ break;
+ default:
+ assert(false);
+ }
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ EchoRequest request_;
+ EchoResponse response_;
+ ClientContext context_;
+ const ServerTryCancelRequestPhase server_try_cancel_;
+ int reads_complete_{0};
+ const ClientCancelInfo client_cancel_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_ = false;
+};
+
+TEST_P(ClientCallbackEnd2endTest, ResponseStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ReadClient test{stub_.get(), DO_NOT_CANCEL};
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ReadClient test{stub_.get(), DO_NOT_CANCEL, ClientCancelInfo{2}};
+ test.Await();
+ // Because cancel in this case races with server finish, we can't be sure that
+ // server interceptors even see cancellation
+}
+
+// Server to cancel before sending any response messages
+TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelBefore) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ReadClient test{stub_.get(), CANCEL_BEFORE_PROCESSING};
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel while writing a response to the stream in parallel
+TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelDuring) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ReadClient test{stub_.get(), CANCEL_DURING_PROCESSING};
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel after writing all the respones to the stream but before
+// returning to the client
+TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelAfter) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ReadClient test{stub_.get(), CANCEL_AFTER_PROCESSING};
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+class BidiClient
+ : public grpc::experimental::ClientBidiReactor<EchoRequest, EchoResponse> {
+ public:
+ BidiClient(grpc::testing::EchoTestService::Stub* stub,
+ ServerTryCancelRequestPhase server_try_cancel,
int num_msgs_to_send, bool cork_metadata, bool first_write_async,
ClientCancelInfo client_cancel = {})
- : server_try_cancel_(server_try_cancel),
- msgs_to_send_{num_msgs_to_send},
- client_cancel_{client_cancel} {
- if (server_try_cancel_ != DO_NOT_CANCEL) {
- // Send server_try_cancel value in the client metadata
- context_.AddMetadata(kServerTryCancelRequest,
+ : server_try_cancel_(server_try_cancel),
+ msgs_to_send_{num_msgs_to_send},
+ client_cancel_{client_cancel} {
+ if (server_try_cancel_ != DO_NOT_CANCEL) {
+ // Send server_try_cancel value in the client metadata
+ context_.AddMetadata(kServerTryCancelRequest,
ToString(server_try_cancel));
- }
- request_.set_message("Hello fren ");
+ }
+ request_.set_message("Hello fren ");
context_.set_initial_metadata_corked(cork_metadata);
- stub->experimental_async()->BidiStream(&context_, this);
+ stub->experimental_async()->BidiStream(&context_, this);
MaybeAsyncWrite(first_write_async);
- StartRead(&response_);
- StartCall();
- }
- void OnReadDone(bool ok) override {
- if (!ok) {
- if (server_try_cancel_ == DO_NOT_CANCEL) {
- if (!client_cancel_.cancel) {
- EXPECT_EQ(reads_complete_, msgs_to_send_);
- } else {
- EXPECT_LE(reads_complete_, writes_complete_);
- }
- }
- } else {
- EXPECT_LE(reads_complete_, msgs_to_send_);
- EXPECT_EQ(response_.message(), request_.message());
- reads_complete_++;
- StartRead(&response_);
- }
- }
- void OnWriteDone(bool ok) override {
+ StartRead(&response_);
+ StartCall();
+ }
+ void OnReadDone(bool ok) override {
+ if (!ok) {
+ if (server_try_cancel_ == DO_NOT_CANCEL) {
+ if (!client_cancel_.cancel) {
+ EXPECT_EQ(reads_complete_, msgs_to_send_);
+ } else {
+ EXPECT_LE(reads_complete_, writes_complete_);
+ }
+ }
+ } else {
+ EXPECT_LE(reads_complete_, msgs_to_send_);
+ EXPECT_EQ(response_.message(), request_.message());
+ reads_complete_++;
+ StartRead(&response_);
+ }
+ }
+ void OnWriteDone(bool ok) override {
if (async_write_thread_.joinable()) {
async_write_thread_.join();
RemoveHold();
}
- if (server_try_cancel_ == DO_NOT_CANCEL) {
- EXPECT_TRUE(ok);
- } else if (!ok) {
- return;
- }
- writes_complete_++;
- MaybeWrite();
- }
- void OnDone(const Status& s) override {
- gpr_log(GPR_INFO, "Sent %d messages", writes_complete_);
- gpr_log(GPR_INFO, "Read %d messages", reads_complete_);
- switch (server_try_cancel_) {
- case DO_NOT_CANCEL:
- if (!client_cancel_.cancel ||
- client_cancel_.ops_before_cancel > msgs_to_send_) {
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(writes_complete_, msgs_to_send_);
- EXPECT_EQ(reads_complete_, writes_complete_);
- } else {
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- EXPECT_EQ(writes_complete_, client_cancel_.ops_before_cancel);
- EXPECT_LE(reads_complete_, writes_complete_);
- }
- break;
- case CANCEL_BEFORE_PROCESSING:
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- // The RPC is canceled before the server did any work or returned any
- // reads, but it's possible that some writes took place first from the
- // client
- EXPECT_LE(writes_complete_, msgs_to_send_);
- EXPECT_EQ(reads_complete_, 0);
- break;
- case CANCEL_DURING_PROCESSING:
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- EXPECT_LE(writes_complete_, msgs_to_send_);
- EXPECT_LE(reads_complete_, writes_complete_);
- break;
- case CANCEL_AFTER_PROCESSING:
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- EXPECT_EQ(writes_complete_, msgs_to_send_);
- // The Server canceled after reading the last message and after writing
- // the message to the client. However, the RPC cancellation might have
- // taken effect before the client actually read the response.
- EXPECT_LE(reads_complete_, writes_complete_);
- break;
- default:
- assert(false);
- }
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- private:
+ if (server_try_cancel_ == DO_NOT_CANCEL) {
+ EXPECT_TRUE(ok);
+ } else if (!ok) {
+ return;
+ }
+ writes_complete_++;
+ MaybeWrite();
+ }
+ void OnDone(const Status& s) override {
+ gpr_log(GPR_INFO, "Sent %d messages", writes_complete_);
+ gpr_log(GPR_INFO, "Read %d messages", reads_complete_);
+ switch (server_try_cancel_) {
+ case DO_NOT_CANCEL:
+ if (!client_cancel_.cancel ||
+ client_cancel_.ops_before_cancel > msgs_to_send_) {
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(writes_complete_, msgs_to_send_);
+ EXPECT_EQ(reads_complete_, writes_complete_);
+ } else {
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ EXPECT_EQ(writes_complete_, client_cancel_.ops_before_cancel);
+ EXPECT_LE(reads_complete_, writes_complete_);
+ }
+ break;
+ case CANCEL_BEFORE_PROCESSING:
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ // The RPC is canceled before the server did any work or returned any
+ // reads, but it's possible that some writes took place first from the
+ // client
+ EXPECT_LE(writes_complete_, msgs_to_send_);
+ EXPECT_EQ(reads_complete_, 0);
+ break;
+ case CANCEL_DURING_PROCESSING:
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ EXPECT_LE(writes_complete_, msgs_to_send_);
+ EXPECT_LE(reads_complete_, writes_complete_);
+ break;
+ case CANCEL_AFTER_PROCESSING:
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ EXPECT_EQ(writes_complete_, msgs_to_send_);
+ // The Server canceled after reading the last message and after writing
+ // the message to the client. However, the RPC cancellation might have
+ // taken effect before the client actually read the response.
+ EXPECT_LE(reads_complete_, writes_complete_);
+ break;
+ default:
+ assert(false);
+ }
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
void MaybeAsyncWrite(bool first_write_async) {
if (first_write_async) {
// Make sure that we have a write to issue.
@@ -1210,46 +1210,46 @@ class BidiClient
}
MaybeWrite();
}
- void MaybeWrite() {
- if (client_cancel_.cancel &&
- writes_complete_ == client_cancel_.ops_before_cancel) {
- context_.TryCancel();
- } else if (writes_complete_ == msgs_to_send_) {
- StartWritesDone();
- } else {
- StartWrite(&request_);
- }
- }
- EchoRequest request_;
- EchoResponse response_;
- ClientContext context_;
- const ServerTryCancelRequestPhase server_try_cancel_;
- int reads_complete_{0};
- int writes_complete_{0};
- const int msgs_to_send_;
- const ClientCancelInfo client_cancel_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_ = false;
+ void MaybeWrite() {
+ if (client_cancel_.cancel &&
+ writes_complete_ == client_cancel_.ops_before_cancel) {
+ context_.TryCancel();
+ } else if (writes_complete_ == msgs_to_send_) {
+ StartWritesDone();
+ } else {
+ StartWrite(&request_);
+ }
+ }
+ EchoRequest request_;
+ EchoResponse response_;
+ ClientContext context_;
+ const ServerTryCancelRequestPhase server_try_cancel_;
+ int reads_complete_{0};
+ int writes_complete_{0};
+ const int msgs_to_send_;
+ const ClientCancelInfo client_cancel_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_ = false;
std::thread async_write_thread_;
bool async_write_thread_start_ = false;
std::mutex async_write_thread_mu_;
std::condition_variable async_write_thread_cv_;
-};
-
-TEST_P(ClientCallbackEnd2endTest, BidiStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
+};
+
+TEST_P(ClientCallbackEnd2endTest, BidiStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/false, /*first_write_async=*/false);
- test.Await();
- // Make sure that the server interceptors were not notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
+ test.Await();
+ // Make sure that the server interceptors were not notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(0, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
MAYBE_SKIP_TEST;
ResetStub();
@@ -1289,277 +1289,277 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
}
}
-TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
+TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend,
/*cork_metadata=*/false, /*first_write_async=*/false,
ClientCancelInfo(2));
- test.Await();
- // Make sure that the server interceptors were notified of a cancel
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel before reading/writing any requests/responses on the stream
-TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
- MAYBE_SKIP_TEST;
- ResetStub();
+ test.Await();
+ // Make sure that the server interceptors were notified of a cancel
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel before reading/writing any requests/responses on the stream
+TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2,
/*cork_metadata=*/false, /*first_write_async=*/false);
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel while reading/writing requests/responses on the stream in
-// parallel
-TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
- MAYBE_SKIP_TEST;
- ResetStub();
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel while reading/writing requests/responses on the stream in
+// parallel
+TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING,
/*num_msgs_to_send=*/10, /*cork_metadata=*/false,
/*first_write_async=*/false);
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Server to cancel after reading/writing all requests/responses on the stream
-// but before returning to the client
-TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
- MAYBE_SKIP_TEST;
- ResetStub();
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Server to cancel after reading/writing all requests/responses on the stream
+// but before returning to the client
+TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5,
/*cork_metadata=*/false, /*first_write_async=*/false);
- test.Await();
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) {
- MAYBE_SKIP_TEST;
- ResetStub();
- class Client : public grpc::experimental::ClientBidiReactor<EchoRequest,
- EchoResponse> {
- public:
- Client(grpc::testing::EchoTestService::Stub* stub) {
- request_.set_message("Hello bidi ");
- stub->experimental_async()->BidiStream(&context_, this);
- StartWrite(&request_);
- StartCall();
- }
- void OnReadDone(bool ok) override {
- EXPECT_TRUE(ok);
- EXPECT_EQ(response_.message(), request_.message());
- }
- void OnWriteDone(bool ok) override {
- EXPECT_TRUE(ok);
- // Now send out the simultaneous Read and WritesDone
- StartWritesDone();
- StartRead(&response_);
- }
- void OnDone(const Status& s) override {
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(response_.message(), request_.message());
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- cv_.notify_one();
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- cv_.wait(l);
- }
- }
-
- private:
- EchoRequest request_;
- EchoResponse response_;
- ClientContext context_;
- std::mutex mu_;
- std::condition_variable cv_;
- bool done_ = false;
- } test{stub_.get()};
-
- test.Await();
-}
-
-TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) {
- MAYBE_SKIP_TEST;
- ChannelArguments args;
- const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials(
- GetParam().credentials_type, &args);
- std::shared_ptr<Channel> channel =
- (GetParam().protocol == Protocol::TCP)
- ? ::grpc::CreateCustomChannel(server_address_.str(), channel_creds,
- args)
- : server_->InProcessChannel(args);
- std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
- stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
- EchoRequest request;
- EchoResponse response;
- ClientContext cli_ctx;
- request.set_message("Hello world.");
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- stub->experimental_async()->Unimplemented(
- &cli_ctx, &request, &response, [&done, &mu, &cv](Status s) {
- EXPECT_EQ(StatusCode::UNIMPLEMENTED, s.error_code());
- EXPECT_EQ("", s.error_message());
-
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
-}
-
-TEST_P(ClientCallbackEnd2endTest,
- ResponseStreamExtraReactionFlowReadsUntilDone) {
- MAYBE_SKIP_TEST;
- ResetStub();
- class ReadAllIncomingDataClient
- : public grpc::experimental::ClientReadReactor<EchoResponse> {
- public:
- ReadAllIncomingDataClient(grpc::testing::EchoTestService::Stub* stub) {
- request_.set_message("Hello client ");
- stub->experimental_async()->ResponseStream(&context_, &request_, this);
- }
- bool WaitForReadDone() {
- std::unique_lock<std::mutex> l(mu_);
- while (!read_done_) {
- read_cv_.wait(l);
- }
- read_done_ = false;
- return read_ok_;
- }
- void Await() {
- std::unique_lock<std::mutex> l(mu_);
- while (!done_) {
- done_cv_.wait(l);
- }
- }
+ test.Await();
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ class Client : public grpc::experimental::ClientBidiReactor<EchoRequest,
+ EchoResponse> {
+ public:
+ Client(grpc::testing::EchoTestService::Stub* stub) {
+ request_.set_message("Hello bidi ");
+ stub->experimental_async()->BidiStream(&context_, this);
+ StartWrite(&request_);
+ StartCall();
+ }
+ void OnReadDone(bool ok) override {
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(response_.message(), request_.message());
+ }
+ void OnWriteDone(bool ok) override {
+ EXPECT_TRUE(ok);
+ // Now send out the simultaneous Read and WritesDone
+ StartWritesDone();
+ StartRead(&response_);
+ }
+ void OnDone(const Status& s) override {
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(response_.message(), request_.message());
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ cv_.notify_one();
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ cv_.wait(l);
+ }
+ }
+
+ private:
+ EchoRequest request_;
+ EchoResponse response_;
+ ClientContext context_;
+ std::mutex mu_;
+ std::condition_variable cv_;
+ bool done_ = false;
+ } test{stub_.get()};
+
+ test.Await();
+}
+
+TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) {
+ MAYBE_SKIP_TEST;
+ ChannelArguments args;
+ const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials(
+ GetParam().credentials_type, &args);
+ std::shared_ptr<Channel> channel =
+ (GetParam().protocol == Protocol::TCP)
+ ? ::grpc::CreateCustomChannel(server_address_.str(), channel_creds,
+ args)
+ : server_->InProcessChannel(args);
+ std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
+ stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext cli_ctx;
+ request.set_message("Hello world.");
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ stub->experimental_async()->Unimplemented(
+ &cli_ctx, &request, &response, [&done, &mu, &cv](Status s) {
+ EXPECT_EQ(StatusCode::UNIMPLEMENTED, s.error_code());
+ EXPECT_EQ("", s.error_message());
+
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+}
+
+TEST_P(ClientCallbackEnd2endTest,
+ ResponseStreamExtraReactionFlowReadsUntilDone) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ class ReadAllIncomingDataClient
+ : public grpc::experimental::ClientReadReactor<EchoResponse> {
+ public:
+ ReadAllIncomingDataClient(grpc::testing::EchoTestService::Stub* stub) {
+ request_.set_message("Hello client ");
+ stub->experimental_async()->ResponseStream(&context_, &request_, this);
+ }
+ bool WaitForReadDone() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!read_done_) {
+ read_cv_.wait(l);
+ }
+ read_done_ = false;
+ return read_ok_;
+ }
+ void Await() {
+ std::unique_lock<std::mutex> l(mu_);
+ while (!done_) {
+ done_cv_.wait(l);
+ }
+ }
// RemoveHold under the same lock used for OnDone to make sure that we don't
// call OnDone directly or indirectly from the RemoveHold function.
void RemoveHoldUnderLock() {
std::unique_lock<std::mutex> l(mu_);
RemoveHold();
}
- const Status& status() {
- std::unique_lock<std::mutex> l(mu_);
- return status_;
- }
-
- private:
- void OnReadDone(bool ok) override {
- std::unique_lock<std::mutex> l(mu_);
- read_ok_ = ok;
- read_done_ = true;
- read_cv_.notify_one();
- }
- void OnDone(const Status& s) override {
- std::unique_lock<std::mutex> l(mu_);
- done_ = true;
- status_ = s;
- done_cv_.notify_one();
- }
-
- EchoRequest request_;
- EchoResponse response_;
- ClientContext context_;
- bool read_ok_ = false;
- bool read_done_ = false;
- std::mutex mu_;
- std::condition_variable read_cv_;
- std::condition_variable done_cv_;
- bool done_ = false;
- Status status_;
- } client{stub_.get()};
-
- int reads_complete = 0;
- client.AddHold();
- client.StartCall();
-
- EchoResponse response;
- bool read_ok = true;
- while (read_ok) {
- client.StartRead(&response);
- read_ok = client.WaitForReadDone();
- if (read_ok) {
- ++reads_complete;
- }
- }
+ const Status& status() {
+ std::unique_lock<std::mutex> l(mu_);
+ return status_;
+ }
+
+ private:
+ void OnReadDone(bool ok) override {
+ std::unique_lock<std::mutex> l(mu_);
+ read_ok_ = ok;
+ read_done_ = true;
+ read_cv_.notify_one();
+ }
+ void OnDone(const Status& s) override {
+ std::unique_lock<std::mutex> l(mu_);
+ done_ = true;
+ status_ = s;
+ done_cv_.notify_one();
+ }
+
+ EchoRequest request_;
+ EchoResponse response_;
+ ClientContext context_;
+ bool read_ok_ = false;
+ bool read_done_ = false;
+ std::mutex mu_;
+ std::condition_variable read_cv_;
+ std::condition_variable done_cv_;
+ bool done_ = false;
+ Status status_;
+ } client{stub_.get()};
+
+ int reads_complete = 0;
+ client.AddHold();
+ client.StartCall();
+
+ EchoResponse response;
+ bool read_ok = true;
+ while (read_ok) {
+ client.StartRead(&response);
+ read_ok = client.WaitForReadDone();
+ if (read_ok) {
+ ++reads_complete;
+ }
+ }
client.RemoveHoldUnderLock();
- client.Await();
-
- EXPECT_EQ(kServerDefaultResponseStreamsToSend, reads_complete);
- EXPECT_EQ(client.status().error_code(), grpc::StatusCode::OK);
-}
-
-std::vector<TestScenario> CreateTestScenarios(bool test_insecure) {
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
-
- std::vector<TestScenario> scenarios;
+ client.Await();
+
+ EXPECT_EQ(kServerDefaultResponseStreamsToSend, reads_complete);
+ EXPECT_EQ(client.status().error_code(), grpc::StatusCode::OK);
+}
+
+std::vector<TestScenario> CreateTestScenarios(bool test_insecure) {
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+
+ std::vector<TestScenario> scenarios;
std::vector<TString> credentials_types{
- GetCredentialsProvider()->GetSecureCredentialsTypeList()};
- auto insec_ok = [] {
- // Only allow insecure credentials type when it is registered with the
- // provider. User may create providers that do not have insecure.
- return GetCredentialsProvider()->GetChannelCredentials(
- kInsecureCredentialsType, nullptr) != nullptr;
- };
- if (test_insecure && insec_ok()) {
- credentials_types.push_back(kInsecureCredentialsType);
- }
- GPR_ASSERT(!credentials_types.empty());
-
- bool barr[]{false, true};
- Protocol parr[]{Protocol::INPROC, Protocol::TCP};
- for (Protocol p : parr) {
- for (const auto& cred : credentials_types) {
- // TODO(vjpai): Test inproc with secure credentials when feasible
- if (p == Protocol::INPROC &&
- (cred != kInsecureCredentialsType || !insec_ok())) {
- continue;
- }
- for (bool callback_server : barr) {
- for (bool use_interceptors : barr) {
- scenarios.emplace_back(callback_server, p, use_interceptors, cred);
- }
- }
- }
- }
- return scenarios;
-}
-
-INSTANTIATE_TEST_SUITE_P(ClientCallbackEnd2endTest, ClientCallbackEnd2endTest,
- ::testing::ValuesIn(CreateTestScenarios(true)));
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
+ GetCredentialsProvider()->GetSecureCredentialsTypeList()};
+ auto insec_ok = [] {
+ // Only allow insecure credentials type when it is registered with the
+ // provider. User may create providers that do not have insecure.
+ return GetCredentialsProvider()->GetChannelCredentials(
+ kInsecureCredentialsType, nullptr) != nullptr;
+ };
+ if (test_insecure && insec_ok()) {
+ credentials_types.push_back(kInsecureCredentialsType);
+ }
+ GPR_ASSERT(!credentials_types.empty());
+
+ bool barr[]{false, true};
+ Protocol parr[]{Protocol::INPROC, Protocol::TCP};
+ for (Protocol p : parr) {
+ for (const auto& cred : credentials_types) {
+ // TODO(vjpai): Test inproc with secure credentials when feasible
+ if (p == Protocol::INPROC &&
+ (cred != kInsecureCredentialsType || !insec_ok())) {
+ continue;
+ }
+ for (bool callback_server : barr) {
+ for (bool use_interceptors : barr) {
+ scenarios.emplace_back(callback_server, p, use_interceptors, cred);
+ }
+ }
+ }
+ }
+ return scenarios;
+}
+
+INSTANTIATE_TEST_SUITE_P(ClientCallbackEnd2endTest, ClientCallbackEnd2endTest,
+ ::testing::ValuesIn(CreateTestScenarios(true)));
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc::testing::TestEnvironment env(argc, argv);
- grpc_init();
- int ret = RUN_ALL_TESTS();
- grpc_shutdown();
- return ret;
-}
+ grpc_init();
+ int ret = RUN_ALL_TESTS();
+ grpc_shutdown();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
index 93131f8104..80e1869396 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test.cc
@@ -1,147 +1,147 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/subprocess.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/subprocess.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
static TString g_root;
-
-namespace grpc {
-namespace testing {
-
-namespace {
-
-class CrashTest : public ::testing::Test {
- protected:
- CrashTest() {}
-
- std::unique_ptr<grpc::testing::EchoTestService::Stub> CreateServerAndStub() {
- auto port = grpc_pick_unused_port_or_die();
- std::ostringstream addr_stream;
- addr_stream << "localhost:" << port;
- auto addr = addr_stream.str();
- server_.reset(new SubProcess({
- g_root + "/client_crash_test_server",
- "--address=" + addr,
- }));
- GPR_ASSERT(server_);
- return grpc::testing::EchoTestService::NewStub(
- grpc::CreateChannel(addr, InsecureChannelCredentials()));
- }
-
- void KillServer() { server_.reset(); }
-
- private:
- std::unique_ptr<SubProcess> server_;
-};
-
-TEST_F(CrashTest, KillBeforeWrite) {
- auto stub = CreateServerAndStub();
-
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_wait_for_ready(true);
-
- auto stream = stub->BidiStream(&context);
-
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- KillServer();
-
- request.set_message("You should be dead");
- // This may succeed or fail depending on the state of the TCP connection
- stream->Write(request);
- // But the read will definitely fail
- EXPECT_FALSE(stream->Read(&response));
-
- EXPECT_FALSE(stream->Finish().ok());
-}
-
-TEST_F(CrashTest, KillAfterWrite) {
- auto stub = CreateServerAndStub();
-
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_wait_for_ready(true);
-
- auto stream = stub->BidiStream(&context);
-
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message("I'm going to kill you");
- EXPECT_TRUE(stream->Write(request));
-
- KillServer();
-
- // This may succeed or fail depending on how quick the server was
- stream->Read(&response);
-
- EXPECT_FALSE(stream->Finish().ok());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
+
+namespace grpc {
+namespace testing {
+
+namespace {
+
+class CrashTest : public ::testing::Test {
+ protected:
+ CrashTest() {}
+
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> CreateServerAndStub() {
+ auto port = grpc_pick_unused_port_or_die();
+ std::ostringstream addr_stream;
+ addr_stream << "localhost:" << port;
+ auto addr = addr_stream.str();
+ server_.reset(new SubProcess({
+ g_root + "/client_crash_test_server",
+ "--address=" + addr,
+ }));
+ GPR_ASSERT(server_);
+ return grpc::testing::EchoTestService::NewStub(
+ grpc::CreateChannel(addr, InsecureChannelCredentials()));
+ }
+
+ void KillServer() { server_.reset(); }
+
+ private:
+ std::unique_ptr<SubProcess> server_;
+};
+
+TEST_F(CrashTest, KillBeforeWrite) {
+ auto stub = CreateServerAndStub();
+
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_wait_for_ready(true);
+
+ auto stream = stub->BidiStream(&context);
+
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ KillServer();
+
+ request.set_message("You should be dead");
+ // This may succeed or fail depending on the state of the TCP connection
+ stream->Write(request);
+ // But the read will definitely fail
+ EXPECT_FALSE(stream->Read(&response));
+
+ EXPECT_FALSE(stream->Finish().ok());
+}
+
+TEST_F(CrashTest, KillAfterWrite) {
+ auto stub = CreateServerAndStub();
+
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_wait_for_ready(true);
+
+ auto stream = stub->BidiStream(&context);
+
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message("I'm going to kill you");
+ EXPECT_TRUE(stream->Write(request));
+
+ KillServer();
+
+ // This may succeed or fail depending on how quick the server was
+ stream->Read(&response);
+
+ EXPECT_FALSE(stream->Finish().ok());
+}
+
+} // namespace
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
TString me = argv[0];
- auto lslash = me.rfind('/');
+ auto lslash = me.rfind('/');
if (lslash != TString::npos) {
- g_root = me.substr(0, lslash);
- } else {
- g_root = ".";
- }
-
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- // Order seems to matter on these tests: run three times to eliminate that
- for (int i = 0; i < 3; i++) {
- if (RUN_ALL_TESTS() != 0) {
- return 1;
- }
- }
- return 0;
-}
+ g_root = me.substr(0, lslash);
+ } else {
+ g_root = ".";
+ }
+
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ // Order seems to matter on these tests: run three times to eliminate that
+ for (int i = 0; i < 3; i++) {
+ if (RUN_ALL_TESTS() != 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
index a7e0dc3aa0..2d5be420f2 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_crash_test_server.cc
@@ -1,80 +1,80 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <gflags/gflags.h>
-#include <iostream>
-#include <memory>
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <gflags/gflags.h>
+#include <iostream>
+#include <memory>
#include <util/generic/string.h>
-
-#include <grpc/support/log.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/cpp/util/test_config.h"
-
-DEFINE_string(address, "", "Address to bind to");
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-
-// In some distros, gflags is in the namespace google, and in some others,
-// in gflags. This hack is enabling us to find both.
-namespace google {}
-namespace gflags {}
-using namespace google;
-using namespace gflags;
-
-namespace grpc {
-namespace testing {
-
-class ServiceImpl final : public ::grpc::testing::EchoTestService::Service {
- Status BidiStream(
- ServerContext* /*context*/,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
- EchoRequest request;
- EchoResponse response;
- while (stream->Read(&request)) {
- gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
- response.set_message(request.message());
- stream->Write(response);
- }
- return Status::OK;
- }
-};
-
-void RunServer() {
- ServiceImpl service;
-
- ServerBuilder builder;
- builder.AddListeningPort(FLAGS_address, grpc::InsecureServerCredentials());
- builder.RegisterService(&service);
- std::unique_ptr<Server> server(builder.BuildAndStart());
- std::cout << "Server listening on " << FLAGS_address << std::endl;
- server->Wait();
-}
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::InitTest(&argc, &argv, true);
- grpc::testing::RunServer();
-
- return 0;
-}
+
+#include <grpc/support/log.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/cpp/util/test_config.h"
+
+DEFINE_string(address, "", "Address to bind to");
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+
+// In some distros, gflags is in the namespace google, and in some others,
+// in gflags. This hack is enabling us to find both.
+namespace google {}
+namespace gflags {}
+using namespace google;
+using namespace gflags;
+
+namespace grpc {
+namespace testing {
+
+class ServiceImpl final : public ::grpc::testing::EchoTestService::Service {
+ Status BidiStream(
+ ServerContext* /*context*/,
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
+ EchoRequest request;
+ EchoResponse response;
+ while (stream->Read(&request)) {
+ gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
+ response.set_message(request.message());
+ stream->Write(response);
+ }
+ return Status::OK;
+ }
+};
+
+void RunServer() {
+ ServiceImpl service;
+
+ ServerBuilder builder;
+ builder.AddListeningPort(FLAGS_address, grpc::InsecureServerCredentials());
+ builder.RegisterService(&service);
+ std::unique_ptr<Server> server(builder.BuildAndStart());
+ std::cout << "Server listening on " << FLAGS_address << std::endl;
+ server->Wait();
+}
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::InitTest(&argc, &argv, true);
+ grpc::testing::RunServer();
+
+ return 0;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
index 18a9e98b71..956876d9f6 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_interceptors_end2end_test.cc
@@ -1,48 +1,48 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-#include <vector>
-
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/generic/generic_stub.h>
-#include <grpcpp/impl/codegen/proto_utils.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-#include <grpcpp/support/client_interceptor.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/interceptors_util.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-#include "test/cpp/util/string_ref_helper.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-namespace {
-
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+#include <vector>
+
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/generic/generic_stub.h>
+#include <grpcpp/impl/codegen/proto_utils.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/client_interceptor.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/interceptors_util.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+#include "test/cpp/util/string_ref_helper.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+namespace {
+
enum class RPCType {
kSyncUnary,
kSyncClientStreaming,
@@ -54,495 +54,495 @@ enum class RPCType {
kAsyncCQBidiStreaming,
};
-/* Hijacks Echo RPC and fills in the expected values */
-class HijackingInterceptor : public experimental::Interceptor {
- public:
- HijackingInterceptor(experimental::ClientRpcInfo* info) {
- info_ = info;
- // Make sure it is the right method
- EXPECT_EQ(strcmp("/grpc.testing.EchoTestService/Echo", info->method()), 0);
- EXPECT_EQ(info->type(), experimental::ClientRpcInfo::Type::UNARY);
- }
-
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- bool hijack = false;
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- auto* map = methods->GetSendInitialMetadata();
- // Check that we can see the test metadata
- ASSERT_EQ(map->size(), static_cast<unsigned>(1));
- auto iterator = map->begin();
- EXPECT_EQ("testkey", iterator->first);
- EXPECT_EQ("testvalue", iterator->second);
- hijack = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- EchoRequest req;
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_EQ(req.message(), "Hello");
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
- // Got nothing to do here for now
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
- auto* map = methods->GetRecvInitialMetadata();
- // Got nothing better to do here for now
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- // Check that we got the hijacked message, and re-insert the expected
- // message
- EXPECT_EQ(resp->message(), "Hello1");
- resp->set_message("Hello");
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- bool found = false;
- // Check that we received the metadata as an echo
- for (const auto& pair : *map) {
- found = pair.first.starts_with("testkey") &&
- pair.second.starts_with("testvalue");
- if (found) break;
- }
- EXPECT_EQ(found, true);
- auto* status = methods->GetRecvStatus();
- EXPECT_EQ(status->ok(), true);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA)) {
- auto* map = methods->GetRecvInitialMetadata();
- // Got nothing better to do here at the moment
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
- // Insert a different message than expected
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- resp->set_message("Hello1");
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- // insert the metadata that we want
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- map->insert(std::make_pair("testkey", "testvalue"));
- auto* status = methods->GetRecvStatus();
- *status = Status(StatusCode::OK, "");
- }
- if (hijack) {
- methods->Hijack();
- } else {
- methods->Proceed();
- }
- }
-
- private:
- experimental::ClientRpcInfo* info_;
-};
-
-class HijackingInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) override {
- return new HijackingInterceptor(info);
- }
-};
-
-class HijackingInterceptorMakesAnotherCall : public experimental::Interceptor {
- public:
- HijackingInterceptorMakesAnotherCall(experimental::ClientRpcInfo* info) {
- info_ = info;
- // Make sure it is the right method
- EXPECT_EQ(strcmp("/grpc.testing.EchoTestService/Echo", info->method()), 0);
- }
-
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- auto* map = methods->GetSendInitialMetadata();
- // Check that we can see the test metadata
- ASSERT_EQ(map->size(), static_cast<unsigned>(1));
- auto iterator = map->begin();
- EXPECT_EQ("testkey", iterator->first);
- EXPECT_EQ("testvalue", iterator->second);
- // Make a copy of the map
- metadata_map_ = *map;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- EchoRequest req;
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_EQ(req.message(), "Hello");
- req_ = req;
- stub_ = grpc::testing::EchoTestService::NewStub(
- methods->GetInterceptedChannel());
- ctx_.AddMetadata(metadata_map_.begin()->first,
- metadata_map_.begin()->second);
- stub_->experimental_async()->Echo(&ctx_, &req_, &resp_,
- [this, methods](Status s) {
- EXPECT_EQ(s.ok(), true);
- EXPECT_EQ(resp_.message(), "Hello");
- methods->Hijack();
- });
- // This is a Unary RPC and we have got nothing interesting to do in the
- // PRE_SEND_CLOSE interception hook point for this interceptor, so let's
- // return here. (We do not want to call methods->Proceed(). When the new
- // RPC returns, we will call methods->Hijack() instead.)
- return;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
- // Got nothing to do here for now
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
- auto* map = methods->GetRecvInitialMetadata();
- // Got nothing better to do here for now
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- // Check that we got the hijacked message, and re-insert the expected
- // message
- EXPECT_EQ(resp->message(), "Hello");
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- bool found = false;
- // Check that we received the metadata as an echo
- for (const auto& pair : *map) {
- found = pair.first.starts_with("testkey") &&
- pair.second.starts_with("testvalue");
- if (found) break;
- }
- EXPECT_EQ(found, true);
- auto* status = methods->GetRecvStatus();
- EXPECT_EQ(status->ok(), true);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA)) {
- auto* map = methods->GetRecvInitialMetadata();
- // Got nothing better to do here at the moment
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
- // Insert a different message than expected
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- resp->set_message(resp_.message());
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- // insert the metadata that we want
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- map->insert(std::make_pair("testkey", "testvalue"));
- auto* status = methods->GetRecvStatus();
- *status = Status(StatusCode::OK, "");
- }
-
- methods->Proceed();
- }
-
- private:
- experimental::ClientRpcInfo* info_;
+/* Hijacks Echo RPC and fills in the expected values */
+class HijackingInterceptor : public experimental::Interceptor {
+ public:
+ HijackingInterceptor(experimental::ClientRpcInfo* info) {
+ info_ = info;
+ // Make sure it is the right method
+ EXPECT_EQ(strcmp("/grpc.testing.EchoTestService/Echo", info->method()), 0);
+ EXPECT_EQ(info->type(), experimental::ClientRpcInfo::Type::UNARY);
+ }
+
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ bool hijack = false;
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ auto* map = methods->GetSendInitialMetadata();
+ // Check that we can see the test metadata
+ ASSERT_EQ(map->size(), static_cast<unsigned>(1));
+ auto iterator = map->begin();
+ EXPECT_EQ("testkey", iterator->first);
+ EXPECT_EQ("testvalue", iterator->second);
+ hijack = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ EchoRequest req;
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_EQ(req.message(), "Hello");
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
+ // Got nothing to do here for now
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
+ auto* map = methods->GetRecvInitialMetadata();
+ // Got nothing better to do here for now
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ // Check that we got the hijacked message, and re-insert the expected
+ // message
+ EXPECT_EQ(resp->message(), "Hello1");
+ resp->set_message("Hello");
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ bool found = false;
+ // Check that we received the metadata as an echo
+ for (const auto& pair : *map) {
+ found = pair.first.starts_with("testkey") &&
+ pair.second.starts_with("testvalue");
+ if (found) break;
+ }
+ EXPECT_EQ(found, true);
+ auto* status = methods->GetRecvStatus();
+ EXPECT_EQ(status->ok(), true);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA)) {
+ auto* map = methods->GetRecvInitialMetadata();
+ // Got nothing better to do here at the moment
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
+ // Insert a different message than expected
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ resp->set_message("Hello1");
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ // insert the metadata that we want
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ map->insert(std::make_pair("testkey", "testvalue"));
+ auto* status = methods->GetRecvStatus();
+ *status = Status(StatusCode::OK, "");
+ }
+ if (hijack) {
+ methods->Hijack();
+ } else {
+ methods->Proceed();
+ }
+ }
+
+ private:
+ experimental::ClientRpcInfo* info_;
+};
+
+class HijackingInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) override {
+ return new HijackingInterceptor(info);
+ }
+};
+
+class HijackingInterceptorMakesAnotherCall : public experimental::Interceptor {
+ public:
+ HijackingInterceptorMakesAnotherCall(experimental::ClientRpcInfo* info) {
+ info_ = info;
+ // Make sure it is the right method
+ EXPECT_EQ(strcmp("/grpc.testing.EchoTestService/Echo", info->method()), 0);
+ }
+
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ auto* map = methods->GetSendInitialMetadata();
+ // Check that we can see the test metadata
+ ASSERT_EQ(map->size(), static_cast<unsigned>(1));
+ auto iterator = map->begin();
+ EXPECT_EQ("testkey", iterator->first);
+ EXPECT_EQ("testvalue", iterator->second);
+ // Make a copy of the map
+ metadata_map_ = *map;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ EchoRequest req;
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_EQ(req.message(), "Hello");
+ req_ = req;
+ stub_ = grpc::testing::EchoTestService::NewStub(
+ methods->GetInterceptedChannel());
+ ctx_.AddMetadata(metadata_map_.begin()->first,
+ metadata_map_.begin()->second);
+ stub_->experimental_async()->Echo(&ctx_, &req_, &resp_,
+ [this, methods](Status s) {
+ EXPECT_EQ(s.ok(), true);
+ EXPECT_EQ(resp_.message(), "Hello");
+ methods->Hijack();
+ });
+ // This is a Unary RPC and we have got nothing interesting to do in the
+ // PRE_SEND_CLOSE interception hook point for this interceptor, so let's
+ // return here. (We do not want to call methods->Proceed(). When the new
+ // RPC returns, we will call methods->Hijack() instead.)
+ return;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
+ // Got nothing to do here for now
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
+ auto* map = methods->GetRecvInitialMetadata();
+ // Got nothing better to do here for now
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ // Check that we got the hijacked message, and re-insert the expected
+ // message
+ EXPECT_EQ(resp->message(), "Hello");
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ bool found = false;
+ // Check that we received the metadata as an echo
+ for (const auto& pair : *map) {
+ found = pair.first.starts_with("testkey") &&
+ pair.second.starts_with("testvalue");
+ if (found) break;
+ }
+ EXPECT_EQ(found, true);
+ auto* status = methods->GetRecvStatus();
+ EXPECT_EQ(status->ok(), true);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA)) {
+ auto* map = methods->GetRecvInitialMetadata();
+ // Got nothing better to do here at the moment
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
+ // Insert a different message than expected
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ resp->set_message(resp_.message());
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ // insert the metadata that we want
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ map->insert(std::make_pair("testkey", "testvalue"));
+ auto* status = methods->GetRecvStatus();
+ *status = Status(StatusCode::OK, "");
+ }
+
+ methods->Proceed();
+ }
+
+ private:
+ experimental::ClientRpcInfo* info_;
std::multimap<TString, TString> metadata_map_;
- ClientContext ctx_;
- EchoRequest req_;
- EchoResponse resp_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
-};
-
-class HijackingInterceptorMakesAnotherCallFactory
- : public experimental::ClientInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) override {
- return new HijackingInterceptorMakesAnotherCall(info);
- }
-};
-
-class BidiStreamingRpcHijackingInterceptor : public experimental::Interceptor {
- public:
- BidiStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
- info_ = info;
- }
-
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- bool hijack = false;
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- CheckMetadata(*methods->GetSendInitialMetadata(), "testkey", "testvalue");
- hijack = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- EchoRequest req;
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_EQ(req.message().find("Hello"), 0u);
- msg = req.message();
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
- // Got nothing to do here for now
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
- CheckMetadata(*methods->GetRecvTrailingMetadata(), "testkey",
- "testvalue");
- auto* status = methods->GetRecvStatus();
- EXPECT_EQ(status->ok(), true);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- resp->set_message(msg);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
- EXPECT_EQ(static_cast<EchoResponse*>(methods->GetRecvMessage())
- ->message()
- .find("Hello"),
- 0u);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- // insert the metadata that we want
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- map->insert(std::make_pair("testkey", "testvalue"));
- auto* status = methods->GetRecvStatus();
- *status = Status(StatusCode::OK, "");
- }
- if (hijack) {
- methods->Hijack();
- } else {
- methods->Proceed();
- }
- }
-
- private:
- experimental::ClientRpcInfo* info_;
+ ClientContext ctx_;
+ EchoRequest req_;
+ EchoResponse resp_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+};
+
+class HijackingInterceptorMakesAnotherCallFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) override {
+ return new HijackingInterceptorMakesAnotherCall(info);
+ }
+};
+
+class BidiStreamingRpcHijackingInterceptor : public experimental::Interceptor {
+ public:
+ BidiStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
+ info_ = info;
+ }
+
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ bool hijack = false;
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ CheckMetadata(*methods->GetSendInitialMetadata(), "testkey", "testvalue");
+ hijack = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ EchoRequest req;
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_EQ(req.message().find("Hello"), 0u);
+ msg = req.message();
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
+ // Got nothing to do here for now
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
+ CheckMetadata(*methods->GetRecvTrailingMetadata(), "testkey",
+ "testvalue");
+ auto* status = methods->GetRecvStatus();
+ EXPECT_EQ(status->ok(), true);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ resp->set_message(msg);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
+ EXPECT_EQ(static_cast<EchoResponse*>(methods->GetRecvMessage())
+ ->message()
+ .find("Hello"),
+ 0u);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ // insert the metadata that we want
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ map->insert(std::make_pair("testkey", "testvalue"));
+ auto* status = methods->GetRecvStatus();
+ *status = Status(StatusCode::OK, "");
+ }
+ if (hijack) {
+ methods->Hijack();
+ } else {
+ methods->Proceed();
+ }
+ }
+
+ private:
+ experimental::ClientRpcInfo* info_;
TString msg;
-};
-
-class ClientStreamingRpcHijackingInterceptor
- : public experimental::Interceptor {
- public:
- ClientStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
- info_ = info;
- }
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- bool hijack = false;
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- hijack = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- if (++count_ > 10) {
- methods->FailHijackedSendMessage();
- }
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_SEND_MESSAGE)) {
- EXPECT_FALSE(got_failed_send_);
- got_failed_send_ = !methods->GetSendMessageStatus();
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
- auto* status = methods->GetRecvStatus();
- *status = Status(StatusCode::UNAVAILABLE, "Done sending 10 messages");
- }
- if (hijack) {
- methods->Hijack();
- } else {
- methods->Proceed();
- }
- }
-
- static bool GotFailedSend() { return got_failed_send_; }
-
- private:
- experimental::ClientRpcInfo* info_;
- int count_ = 0;
- static bool got_failed_send_;
-};
-
-bool ClientStreamingRpcHijackingInterceptor::got_failed_send_ = false;
-
-class ClientStreamingRpcHijackingInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) override {
- return new ClientStreamingRpcHijackingInterceptor(info);
- }
-};
-
-class ServerStreamingRpcHijackingInterceptor
- : public experimental::Interceptor {
- public:
- ServerStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
- info_ = info;
+};
+
+class ClientStreamingRpcHijackingInterceptor
+ : public experimental::Interceptor {
+ public:
+ ClientStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
+ info_ = info;
+ }
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ bool hijack = false;
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ hijack = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ if (++count_ > 10) {
+ methods->FailHijackedSendMessage();
+ }
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_SEND_MESSAGE)) {
+ EXPECT_FALSE(got_failed_send_);
+ got_failed_send_ = !methods->GetSendMessageStatus();
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
+ auto* status = methods->GetRecvStatus();
+ *status = Status(StatusCode::UNAVAILABLE, "Done sending 10 messages");
+ }
+ if (hijack) {
+ methods->Hijack();
+ } else {
+ methods->Proceed();
+ }
+ }
+
+ static bool GotFailedSend() { return got_failed_send_; }
+
+ private:
+ experimental::ClientRpcInfo* info_;
+ int count_ = 0;
+ static bool got_failed_send_;
+};
+
+bool ClientStreamingRpcHijackingInterceptor::got_failed_send_ = false;
+
+class ClientStreamingRpcHijackingInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) override {
+ return new ClientStreamingRpcHijackingInterceptor(info);
+ }
+};
+
+class ServerStreamingRpcHijackingInterceptor
+ : public experimental::Interceptor {
+ public:
+ ServerStreamingRpcHijackingInterceptor(experimental::ClientRpcInfo* info) {
+ info_ = info;
got_failed_message_ = false;
- }
-
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- bool hijack = false;
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- auto* map = methods->GetSendInitialMetadata();
- // Check that we can see the test metadata
- ASSERT_EQ(map->size(), static_cast<unsigned>(1));
- auto iterator = map->begin();
- EXPECT_EQ("testkey", iterator->first);
- EXPECT_EQ("testvalue", iterator->second);
- hijack = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- EchoRequest req;
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_EQ(req.message(), "Hello");
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
- // Got nothing to do here for now
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- bool found = false;
- // Check that we received the metadata as an echo
- for (const auto& pair : *map) {
- found = pair.first.starts_with("testkey") &&
- pair.second.starts_with("testvalue");
- if (found) break;
- }
- EXPECT_EQ(found, true);
- auto* status = methods->GetRecvStatus();
- EXPECT_EQ(status->ok(), true);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
- if (++count_ > 10) {
- methods->FailHijackedRecvMessage();
- }
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- resp->set_message("Hello");
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
- // Only the last message will be a failure
- EXPECT_FALSE(got_failed_message_);
- got_failed_message_ = methods->GetRecvMessage() == nullptr;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- // insert the metadata that we want
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- map->insert(std::make_pair("testkey", "testvalue"));
- auto* status = methods->GetRecvStatus();
- *status = Status(StatusCode::OK, "");
- }
- if (hijack) {
- methods->Hijack();
- } else {
- methods->Proceed();
- }
- }
-
- static bool GotFailedMessage() { return got_failed_message_; }
-
- private:
- experimental::ClientRpcInfo* info_;
- static bool got_failed_message_;
- int count_ = 0;
-};
-
-bool ServerStreamingRpcHijackingInterceptor::got_failed_message_ = false;
-
-class ServerStreamingRpcHijackingInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) override {
- return new ServerStreamingRpcHijackingInterceptor(info);
- }
-};
-
-class BidiStreamingRpcHijackingInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) override {
- return new BidiStreamingRpcHijackingInterceptor(info);
- }
-};
-
-// The logging interceptor is for testing purposes only. It is used to verify
-// that all the appropriate hook points are invoked for an RPC. The counts are
-// reset each time a new object of LoggingInterceptor is created, so only a
-// single RPC should be made on the channel before calling the Verify methods.
-class LoggingInterceptor : public experimental::Interceptor {
- public:
- LoggingInterceptor(experimental::ClientRpcInfo* /*info*/) {
- pre_send_initial_metadata_ = false;
- pre_send_message_count_ = 0;
- pre_send_close_ = false;
- post_recv_initial_metadata_ = false;
- post_recv_message_count_ = 0;
- post_recv_status_ = false;
- }
-
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- auto* map = methods->GetSendInitialMetadata();
- // Check that we can see the test metadata
- ASSERT_EQ(map->size(), static_cast<unsigned>(1));
- auto iterator = map->begin();
- EXPECT_EQ("testkey", iterator->first);
- EXPECT_EQ("testvalue", iterator->second);
- ASSERT_FALSE(pre_send_initial_metadata_);
- pre_send_initial_metadata_ = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- EchoRequest req;
+ }
+
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ bool hijack = false;
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ auto* map = methods->GetSendInitialMetadata();
+ // Check that we can see the test metadata
+ ASSERT_EQ(map->size(), static_cast<unsigned>(1));
+ auto iterator = map->begin();
+ EXPECT_EQ("testkey", iterator->first);
+ EXPECT_EQ("testvalue", iterator->second);
+ hijack = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ EchoRequest req;
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_EQ(req.message(), "Hello");
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
+ // Got nothing to do here for now
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ bool found = false;
+ // Check that we received the metadata as an echo
+ for (const auto& pair : *map) {
+ found = pair.first.starts_with("testkey") &&
+ pair.second.starts_with("testvalue");
+ if (found) break;
+ }
+ EXPECT_EQ(found, true);
+ auto* status = methods->GetRecvStatus();
+ EXPECT_EQ(status->ok(), true);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_MESSAGE)) {
+ if (++count_ > 10) {
+ methods->FailHijackedRecvMessage();
+ }
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ resp->set_message("Hello");
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
+ // Only the last message will be a failure
+ EXPECT_FALSE(got_failed_message_);
+ got_failed_message_ = methods->GetRecvMessage() == nullptr;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ // insert the metadata that we want
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ map->insert(std::make_pair("testkey", "testvalue"));
+ auto* status = methods->GetRecvStatus();
+ *status = Status(StatusCode::OK, "");
+ }
+ if (hijack) {
+ methods->Hijack();
+ } else {
+ methods->Proceed();
+ }
+ }
+
+ static bool GotFailedMessage() { return got_failed_message_; }
+
+ private:
+ experimental::ClientRpcInfo* info_;
+ static bool got_failed_message_;
+ int count_ = 0;
+};
+
+bool ServerStreamingRpcHijackingInterceptor::got_failed_message_ = false;
+
+class ServerStreamingRpcHijackingInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) override {
+ return new ServerStreamingRpcHijackingInterceptor(info);
+ }
+};
+
+class BidiStreamingRpcHijackingInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) override {
+ return new BidiStreamingRpcHijackingInterceptor(info);
+ }
+};
+
+// The logging interceptor is for testing purposes only. It is used to verify
+// that all the appropriate hook points are invoked for an RPC. The counts are
+// reset each time a new object of LoggingInterceptor is created, so only a
+// single RPC should be made on the channel before calling the Verify methods.
+class LoggingInterceptor : public experimental::Interceptor {
+ public:
+ LoggingInterceptor(experimental::ClientRpcInfo* /*info*/) {
+ pre_send_initial_metadata_ = false;
+ pre_send_message_count_ = 0;
+ pre_send_close_ = false;
+ post_recv_initial_metadata_ = false;
+ post_recv_message_count_ = 0;
+ post_recv_status_ = false;
+ }
+
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ auto* map = methods->GetSendInitialMetadata();
+ // Check that we can see the test metadata
+ ASSERT_EQ(map->size(), static_cast<unsigned>(1));
+ auto iterator = map->begin();
+ EXPECT_EQ("testkey", iterator->first);
+ EXPECT_EQ("testvalue", iterator->second);
+ ASSERT_FALSE(pre_send_initial_metadata_);
+ pre_send_initial_metadata_ = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ EchoRequest req;
auto* send_msg = methods->GetSendMessage();
if (send_msg == nullptr) {
// We did not get the non-serialized form of the message. Get the
@@ -559,53 +559,53 @@ class LoggingInterceptor : public experimental::Interceptor {
static_cast<const EchoRequest*>(send_msg)->message().find("Hello"),
0u);
}
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_TRUE(req.message().find("Hello") == 0u);
- pre_send_message_count_++;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
- // Got nothing to do here for now
- pre_send_close_ = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
- auto* map = methods->GetRecvInitialMetadata();
- // Got nothing better to do here for now
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- post_recv_initial_metadata_ = true;
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- if (resp != nullptr) {
- EXPECT_TRUE(resp->message().find("Hello") == 0u);
- post_recv_message_count_++;
- }
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
- auto* map = methods->GetRecvTrailingMetadata();
- bool found = false;
- // Check that we received the metadata as an echo
- for (const auto& pair : *map) {
- found = pair.first.starts_with("testkey") &&
- pair.second.starts_with("testvalue");
- if (found) break;
- }
- EXPECT_EQ(found, true);
- auto* status = methods->GetRecvStatus();
- EXPECT_EQ(status->ok(), true);
- post_recv_status_ = true;
- }
- methods->Proceed();
- }
-
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_TRUE(req.message().find("Hello") == 0u);
+ pre_send_message_count_++;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_CLOSE)) {
+ // Got nothing to do here for now
+ pre_send_close_ = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
+ auto* map = methods->GetRecvInitialMetadata();
+ // Got nothing better to do here for now
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ post_recv_initial_metadata_ = true;
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ if (resp != nullptr) {
+ EXPECT_TRUE(resp->message().find("Hello") == 0u);
+ post_recv_message_count_++;
+ }
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_STATUS)) {
+ auto* map = methods->GetRecvTrailingMetadata();
+ bool found = false;
+ // Check that we received the metadata as an echo
+ for (const auto& pair : *map) {
+ found = pair.first.starts_with("testkey") &&
+ pair.second.starts_with("testvalue");
+ if (found) break;
+ }
+ EXPECT_EQ(found, true);
+ auto* status = methods->GetRecvStatus();
+ EXPECT_EQ(status->ok(), true);
+ post_recv_status_ = true;
+ }
+ methods->Proceed();
+ }
+
static void VerifyCall(RPCType type) {
switch (type) {
case RPCType::kSyncUnary:
@@ -627,62 +627,62 @@ class LoggingInterceptor : public experimental::Interceptor {
}
}
- static void VerifyCallCommon() {
- EXPECT_TRUE(pre_send_initial_metadata_);
- EXPECT_TRUE(pre_send_close_);
- EXPECT_TRUE(post_recv_initial_metadata_);
- EXPECT_TRUE(post_recv_status_);
- }
-
- static void VerifyUnaryCall() {
- VerifyCallCommon();
- EXPECT_EQ(pre_send_message_count_, 1);
- EXPECT_EQ(post_recv_message_count_, 1);
- }
-
- static void VerifyClientStreamingCall() {
- VerifyCallCommon();
- EXPECT_EQ(pre_send_message_count_, kNumStreamingMessages);
- EXPECT_EQ(post_recv_message_count_, 1);
- }
-
- static void VerifyServerStreamingCall() {
- VerifyCallCommon();
- EXPECT_EQ(pre_send_message_count_, 1);
- EXPECT_EQ(post_recv_message_count_, kNumStreamingMessages);
- }
-
- static void VerifyBidiStreamingCall() {
- VerifyCallCommon();
- EXPECT_EQ(pre_send_message_count_, kNumStreamingMessages);
- EXPECT_EQ(post_recv_message_count_, kNumStreamingMessages);
- }
-
- private:
- static bool pre_send_initial_metadata_;
- static int pre_send_message_count_;
- static bool pre_send_close_;
- static bool post_recv_initial_metadata_;
- static int post_recv_message_count_;
- static bool post_recv_status_;
-};
-
-bool LoggingInterceptor::pre_send_initial_metadata_;
-int LoggingInterceptor::pre_send_message_count_;
-bool LoggingInterceptor::pre_send_close_;
-bool LoggingInterceptor::post_recv_initial_metadata_;
-int LoggingInterceptor::post_recv_message_count_;
-bool LoggingInterceptor::post_recv_status_;
-
-class LoggingInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) override {
- return new LoggingInterceptor(info);
- }
-};
-
+ static void VerifyCallCommon() {
+ EXPECT_TRUE(pre_send_initial_metadata_);
+ EXPECT_TRUE(pre_send_close_);
+ EXPECT_TRUE(post_recv_initial_metadata_);
+ EXPECT_TRUE(post_recv_status_);
+ }
+
+ static void VerifyUnaryCall() {
+ VerifyCallCommon();
+ EXPECT_EQ(pre_send_message_count_, 1);
+ EXPECT_EQ(post_recv_message_count_, 1);
+ }
+
+ static void VerifyClientStreamingCall() {
+ VerifyCallCommon();
+ EXPECT_EQ(pre_send_message_count_, kNumStreamingMessages);
+ EXPECT_EQ(post_recv_message_count_, 1);
+ }
+
+ static void VerifyServerStreamingCall() {
+ VerifyCallCommon();
+ EXPECT_EQ(pre_send_message_count_, 1);
+ EXPECT_EQ(post_recv_message_count_, kNumStreamingMessages);
+ }
+
+ static void VerifyBidiStreamingCall() {
+ VerifyCallCommon();
+ EXPECT_EQ(pre_send_message_count_, kNumStreamingMessages);
+ EXPECT_EQ(post_recv_message_count_, kNumStreamingMessages);
+ }
+
+ private:
+ static bool pre_send_initial_metadata_;
+ static int pre_send_message_count_;
+ static bool pre_send_close_;
+ static bool post_recv_initial_metadata_;
+ static int post_recv_message_count_;
+ static bool post_recv_status_;
+};
+
+bool LoggingInterceptor::pre_send_initial_metadata_;
+int LoggingInterceptor::pre_send_message_count_;
+bool LoggingInterceptor::pre_send_close_;
+bool LoggingInterceptor::post_recv_initial_metadata_;
+int LoggingInterceptor::post_recv_message_count_;
+bool LoggingInterceptor::post_recv_status_;
+
+class LoggingInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) override {
+ return new LoggingInterceptor(info);
+ }
+};
+
class TestScenario {
public:
explicit TestScenario(const RPCType& type) : type_(type) {}
@@ -706,19 +706,19 @@ std::vector<TestScenario> CreateTestScenarios() {
class ParameterizedClientInterceptorsEnd2endTest
: public ::testing::TestWithParam<TestScenario> {
- protected:
+ protected:
ParameterizedClientInterceptorsEnd2endTest() {
- int port = grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
+ int port = grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
server_address_ = "localhost:" + ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
~ParameterizedClientInterceptorsEnd2endTest() { server_->Shutdown(); }
-
+
void SendRPC(const std::shared_ptr<Channel>& channel) {
switch (GetParam().type()) {
case RPCType::kSyncUnary:
@@ -750,30 +750,30 @@ class ParameterizedClientInterceptorsEnd2endTest
TString server_address_;
EchoTestServiceStreamingImpl service_;
- std::unique_ptr<Server> server_;
-};
-
+ std::unique_ptr<Server> server_;
+};
+
TEST_P(ParameterizedClientInterceptorsEnd2endTest,
ClientInterceptorLoggingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptors
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
SendRPC(channel);
LoggingInterceptor::VerifyCall(GetParam().type());
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
INSTANTIATE_TEST_SUITE_P(ParameterizedClientInterceptorsEnd2end,
ParameterizedClientInterceptorsEnd2endTest,
::testing::ValuesIn(CreateTestScenarios()));
@@ -798,86 +798,86 @@ class ClientInterceptorsEnd2endTest
std::unique_ptr<Server> server_;
};
-TEST_F(ClientInterceptorsEnd2endTest,
- LameChannelClientInterceptorHijackingTest) {
- ChannelArguments args;
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
- new HijackingInterceptorFactory()));
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, nullptr, args, std::move(creators));
- MakeCall(channel);
-}
-
-TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorHijackingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy interceptors before hijacking interceptor
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
- new HijackingInterceptorFactory()));
- // Add 20 dummy interceptors after hijacking interceptor
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeCall(channel);
- // Make sure only 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorLogThenHijackTest) {
- ChannelArguments args;
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
- new HijackingInterceptorFactory()));
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeCall(channel);
- LoggingInterceptor::VerifyUnaryCall();
-}
-
-TEST_F(ClientInterceptorsEnd2endTest,
- ClientInterceptorHijackingMakesAnotherCallTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- // Add 5 dummy interceptors before hijacking interceptor
- creators.reserve(5);
- for (auto i = 0; i < 5; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- creators.push_back(
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>(
- new HijackingInterceptorMakesAnotherCallFactory()));
- // Add 7 dummy interceptors after hijacking interceptor
- for (auto i = 0; i < 7; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = server_->experimental().InProcessChannelWithInterceptors(
- args, std::move(creators));
-
- MakeCall(channel);
- // Make sure all interceptors were run once, since the hijacking interceptor
- // makes an RPC on the intercepted channel
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 12);
-}
-
+TEST_F(ClientInterceptorsEnd2endTest,
+ LameChannelClientInterceptorHijackingTest) {
+ ChannelArguments args;
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
+ new HijackingInterceptorFactory()));
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, nullptr, args, std::move(creators));
+ MakeCall(channel);
+}
+
+TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorHijackingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy interceptors before hijacking interceptor
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
+ new HijackingInterceptorFactory()));
+ // Add 20 dummy interceptors after hijacking interceptor
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeCall(channel);
+ // Make sure only 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+TEST_F(ClientInterceptorsEnd2endTest, ClientInterceptorLogThenHijackTest) {
+ ChannelArguments args;
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ creators.push_back(std::unique_ptr<HijackingInterceptorFactory>(
+ new HijackingInterceptorFactory()));
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeCall(channel);
+ LoggingInterceptor::VerifyUnaryCall();
+}
+
+TEST_F(ClientInterceptorsEnd2endTest,
+ ClientInterceptorHijackingMakesAnotherCallTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ // Add 5 dummy interceptors before hijacking interceptor
+ creators.reserve(5);
+ for (auto i = 0; i < 5; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ creators.push_back(
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>(
+ new HijackingInterceptorMakesAnotherCallFactory()));
+ // Add 7 dummy interceptors after hijacking interceptor
+ for (auto i = 0; i < 7; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = server_->experimental().InProcessChannelWithInterceptors(
+ args, std::move(creators));
+
+ MakeCall(channel);
+ // Make sure all interceptors were run once, since the hijacking interceptor
+ // makes an RPC on the intercepted channel
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 12);
+}
+
class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
protected:
ClientInterceptorsCallbackEnd2endTest() {
@@ -898,151 +898,151 @@ class ClientInterceptorsCallbackEnd2endTest : public ::testing::Test {
};
TEST_F(ClientInterceptorsCallbackEnd2endTest,
- ClientInterceptorLoggingTestWithCallback) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = server_->experimental().InProcessChannelWithInterceptors(
- args, std::move(creators));
- MakeCallbackCall(channel);
- LoggingInterceptor::VerifyUnaryCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
+ ClientInterceptorLoggingTestWithCallback) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptors
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = server_->experimental().InProcessChannelWithInterceptors(
+ args, std::move(creators));
+ MakeCallbackCall(channel);
+ LoggingInterceptor::VerifyUnaryCall();
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
TEST_F(ClientInterceptorsCallbackEnd2endTest,
- ClientInterceptorFactoryAllowsNullptrReturn) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors and 20 null interceptors
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- creators.push_back(
- std::unique_ptr<NullInterceptorFactory>(new NullInterceptorFactory()));
- }
- auto channel = server_->experimental().InProcessChannelWithInterceptors(
- args, std::move(creators));
- MakeCallbackCall(channel);
- LoggingInterceptor::VerifyUnaryCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
- protected:
- ClientInterceptorsStreamingEnd2endTest() {
- int port = grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
+ ClientInterceptorFactoryAllowsNullptrReturn) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptors and 20 null interceptors
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ creators.push_back(
+ std::unique_ptr<NullInterceptorFactory>(new NullInterceptorFactory()));
+ }
+ auto channel = server_->experimental().InProcessChannelWithInterceptors(
+ args, std::move(creators));
+ MakeCallbackCall(channel);
+ LoggingInterceptor::VerifyUnaryCall();
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+class ClientInterceptorsStreamingEnd2endTest : public ::testing::Test {
+ protected:
+ ClientInterceptorsStreamingEnd2endTest() {
+ int port = grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
server_address_ = "localhost:" + ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- ~ClientInterceptorsStreamingEnd2endTest() { server_->Shutdown(); }
-
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ ~ClientInterceptorsStreamingEnd2endTest() { server_->Shutdown(); }
+
TString server_address_;
- EchoTestServiceStreamingImpl service_;
- std::unique_ptr<Server> server_;
-};
-
-TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeClientStreamingCall(channel);
- LoggingInterceptor::VerifyClientStreamingCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeServerStreamingCall(channel);
- LoggingInterceptor::VerifyServerStreamingCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingHijackingTest) {
- ChannelArguments args;
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<ClientStreamingRpcHijackingInterceptorFactory>(
- new ClientStreamingRpcHijackingInterceptorFactory()));
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
-
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- EchoResponse resp;
- req.mutable_param()->set_echo_metadata(true);
- req.set_message("Hello");
- string expected_resp = "";
- auto writer = stub->RequestStream(&ctx, &resp);
- for (int i = 0; i < 10; i++) {
- EXPECT_TRUE(writer->Write(req));
- expected_resp += "Hello";
- }
- // The interceptor will reject the 11th message
- writer->Write(req);
- Status s = writer->Finish();
- EXPECT_EQ(s.ok(), false);
- EXPECT_TRUE(ClientStreamingRpcHijackingInterceptor::GotFailedSend());
-}
-
-TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingHijackingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>(
- new ServerStreamingRpcHijackingInterceptorFactory()));
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeServerStreamingCall(channel);
- EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage());
-}
-
+ EchoTestServiceStreamingImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptors
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeClientStreamingCall(channel);
+ LoggingInterceptor::VerifyClientStreamingCall();
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptors
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeServerStreamingCall(channel);
+ LoggingInterceptor::VerifyServerStreamingCall();
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+TEST_F(ClientInterceptorsStreamingEnd2endTest, ClientStreamingHijackingTest) {
+ ChannelArguments args;
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<ClientStreamingRpcHijackingInterceptorFactory>(
+ new ClientStreamingRpcHijackingInterceptorFactory()));
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ EchoResponse resp;
+ req.mutable_param()->set_echo_metadata(true);
+ req.set_message("Hello");
+ string expected_resp = "";
+ auto writer = stub->RequestStream(&ctx, &resp);
+ for (int i = 0; i < 10; i++) {
+ EXPECT_TRUE(writer->Write(req));
+ expected_resp += "Hello";
+ }
+ // The interceptor will reject the 11th message
+ writer->Write(req);
+ Status s = writer->Finish();
+ EXPECT_EQ(s.ok(), false);
+ EXPECT_TRUE(ClientStreamingRpcHijackingInterceptor::GotFailedSend());
+}
+
+TEST_F(ClientInterceptorsStreamingEnd2endTest, ServerStreamingHijackingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<ServerStreamingRpcHijackingInterceptorFactory>(
+ new ServerStreamingRpcHijackingInterceptorFactory()));
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeServerStreamingCall(channel);
+ EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage());
+}
+
TEST_F(ClientInterceptorsStreamingEnd2endTest,
AsyncCQServerStreamingHijackingTest) {
ChannelArguments args;
@@ -1058,137 +1058,137 @@ TEST_F(ClientInterceptorsStreamingEnd2endTest,
EXPECT_TRUE(ServerStreamingRpcHijackingInterceptor::GotFailedMessage());
}
-TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingHijackingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<BidiStreamingRpcHijackingInterceptorFactory>(
- new BidiStreamingRpcHijackingInterceptorFactory()));
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeBidiStreamingCall(channel);
-}
-
-TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptors
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeBidiStreamingCall(channel);
- LoggingInterceptor::VerifyBidiStreamingCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-class ClientGlobalInterceptorEnd2endTest : public ::testing::Test {
- protected:
- ClientGlobalInterceptorEnd2endTest() {
- int port = grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
+TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingHijackingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<BidiStreamingRpcHijackingInterceptorFactory>(
+ new BidiStreamingRpcHijackingInterceptorFactory()));
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeBidiStreamingCall(channel);
+}
+
+TEST_F(ClientInterceptorsStreamingEnd2endTest, BidiStreamingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(std::unique_ptr<LoggingInterceptorFactory>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptors
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeBidiStreamingCall(channel);
+ LoggingInterceptor::VerifyBidiStreamingCall();
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+class ClientGlobalInterceptorEnd2endTest : public ::testing::Test {
+ protected:
+ ClientGlobalInterceptorEnd2endTest() {
+ int port = grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
server_address_ = "localhost:" + ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- ~ClientGlobalInterceptorEnd2endTest() { server_->Shutdown(); }
-
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ ~ClientGlobalInterceptorEnd2endTest() { server_->Shutdown(); }
+
TString server_address_;
- TestServiceImpl service_;
- std::unique_ptr<Server> server_;
-};
-
-TEST_F(ClientGlobalInterceptorEnd2endTest, DummyGlobalInterceptor) {
- // We should ideally be registering a global interceptor only once per
- // process, but for the purposes of testing, it should be fine to modify the
- // registered global interceptor when there are no ongoing gRPC operations
- DummyInterceptorFactory global_factory;
- experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy interceptors
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeCall(channel);
- // Make sure all 20 dummy interceptors were run with the global interceptor
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 21);
- experimental::TestOnlyResetGlobalClientInterceptorFactory();
-}
-
-TEST_F(ClientGlobalInterceptorEnd2endTest, LoggingGlobalInterceptor) {
- // We should ideally be registering a global interceptor only once per
- // process, but for the purposes of testing, it should be fine to modify the
- // registered global interceptor when there are no ongoing gRPC operations
- LoggingInterceptorFactory global_factory;
- experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy interceptors
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeCall(channel);
- LoggingInterceptor::VerifyUnaryCall();
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
- experimental::TestOnlyResetGlobalClientInterceptorFactory();
-}
-
-TEST_F(ClientGlobalInterceptorEnd2endTest, HijackingGlobalInterceptor) {
- // We should ideally be registering a global interceptor only once per
- // process, but for the purposes of testing, it should be fine to modify the
- // registered global interceptor when there are no ongoing gRPC operations
- HijackingInterceptorFactory global_factory;
- experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
- ChannelArguments args;
- DummyInterceptor::Reset();
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy interceptors
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- auto channel = experimental::CreateCustomChannelWithInterceptors(
- server_address_, InsecureChannelCredentials(), args, std::move(creators));
- MakeCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
- experimental::TestOnlyResetGlobalClientInterceptorFactory();
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ TestServiceImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_F(ClientGlobalInterceptorEnd2endTest, DummyGlobalInterceptor) {
+ // We should ideally be registering a global interceptor only once per
+ // process, but for the purposes of testing, it should be fine to modify the
+ // registered global interceptor when there are no ongoing gRPC operations
+ DummyInterceptorFactory global_factory;
+ experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy interceptors
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeCall(channel);
+ // Make sure all 20 dummy interceptors were run with the global interceptor
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 21);
+ experimental::TestOnlyResetGlobalClientInterceptorFactory();
+}
+
+TEST_F(ClientGlobalInterceptorEnd2endTest, LoggingGlobalInterceptor) {
+ // We should ideally be registering a global interceptor only once per
+ // process, but for the purposes of testing, it should be fine to modify the
+ // registered global interceptor when there are no ongoing gRPC operations
+ LoggingInterceptorFactory global_factory;
+ experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy interceptors
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeCall(channel);
+ LoggingInterceptor::VerifyUnaryCall();
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ experimental::TestOnlyResetGlobalClientInterceptorFactory();
+}
+
+TEST_F(ClientGlobalInterceptorEnd2endTest, HijackingGlobalInterceptor) {
+ // We should ideally be registering a global interceptor only once per
+ // process, but for the purposes of testing, it should be fine to modify the
+ // registered global interceptor when there are no ongoing gRPC operations
+ HijackingInterceptorFactory global_factory;
+ experimental::RegisterGlobalClientInterceptorFactory(&global_factory);
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy interceptors
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ auto channel = experimental::CreateCustomChannelWithInterceptors(
+ server_address_, InsecureChannelCredentials(), args, std::move(creators));
+ MakeCall(channel);
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+ experimental::TestOnlyResetGlobalClientInterceptorFactory();
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
index 30cfb444a1..fd08dd163d 100644
--- a/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/client_lb_end2end_test.cc
@@ -1,1655 +1,1655 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <algorithm>
-#include <memory>
-#include <mutex>
-#include <random>
-#include <set>
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <algorithm>
+#include <memory>
+#include <mutex>
+#include <random>
+#include <set>
#include <util/generic/string.h>
-#include <thread>
-
+#include <thread>
+
#include "y_absl/strings/str_cat.h"
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/health_check_service_interface.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-
-#include "src/core/ext/filters/client_channel/backup_poller.h"
-#include "src/core/ext/filters/client_channel/global_subchannel_pool.h"
-#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
-#include "src/core/ext/filters/client_channel/server_address.h"
-#include "src/core/ext/filters/client_channel/service_config.h"
-#include "src/core/lib/backoff/backoff.h"
-#include "src/core/lib/channel/channel_args.h"
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/gprpp/debug_location.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/health_check_service_interface.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+
+#include "src/core/ext/filters/client_channel/backup_poller.h"
+#include "src/core/ext/filters/client_channel/global_subchannel_pool.h"
+#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
+#include "src/core/ext/filters/client_channel/server_address.h"
+#include "src/core/ext/filters/client_channel/service_config.h"
+#include "src/core/lib/backoff/backoff.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gprpp/debug_location.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/parse_address.h"
-#include "src/core/lib/iomgr/tcp_client.h"
-#include "src/core/lib/security/credentials/fake/fake_credentials.h"
-#include "src/cpp/client/secure_credentials.h"
-#include "src/cpp/server/secure_server_credentials.h"
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "src/proto/grpc/testing/xds/orca_load_report_for_test.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/core/util/test_lb_policies.h"
-#include "test/cpp/end2end/test_service_impl.h"
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
-// defined in tcp_client.cc
-extern grpc_tcp_client_vtable* grpc_tcp_client_impl;
-
-static grpc_tcp_client_vtable* default_client_impl;
-
-namespace grpc {
-namespace testing {
-namespace {
-
-gpr_atm g_connection_delay_ms;
-
-void tcp_client_connect_with_delay(grpc_closure* closure, grpc_endpoint** ep,
- grpc_pollset_set* interested_parties,
- const grpc_channel_args* channel_args,
- const grpc_resolved_address* addr,
- grpc_millis deadline) {
- const int delay_ms = gpr_atm_acq_load(&g_connection_delay_ms);
- if (delay_ms > 0) {
- gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
- }
- default_client_impl->connect(closure, ep, interested_parties, channel_args,
- addr, deadline + delay_ms);
-}
-
-grpc_tcp_client_vtable delayed_connect = {tcp_client_connect_with_delay};
-
-// Subclass of TestServiceImpl that increments a request counter for
-// every call to the Echo RPC.
-class MyTestServiceImpl : public TestServiceImpl {
- public:
- Status Echo(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- const udpa::data::orca::v1::OrcaLoadReport* load_report = nullptr;
- {
- grpc::internal::MutexLock lock(&mu_);
- ++request_count_;
- load_report = load_report_;
- }
- AddClient(context->peer().c_str());
- if (load_report != nullptr) {
- // TODO(roth): Once we provide a more standard server-side API for
- // populating this data, use that API here.
- context->AddTrailingMetadata("x-endpoint-load-metrics-bin",
- load_report->SerializeAsString());
- }
- return TestServiceImpl::Echo(context, request, response);
- }
-
- int request_count() {
- grpc::internal::MutexLock lock(&mu_);
- return request_count_;
- }
-
- void ResetCounters() {
- grpc::internal::MutexLock lock(&mu_);
- request_count_ = 0;
- }
-
+#include "src/core/lib/iomgr/tcp_client.h"
+#include "src/core/lib/security/credentials/fake/fake_credentials.h"
+#include "src/cpp/client/secure_credentials.h"
+#include "src/cpp/server/secure_server_credentials.h"
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "src/proto/grpc/testing/xds/orca_load_report_for_test.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/core/util/test_lb_policies.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
+// defined in tcp_client.cc
+extern grpc_tcp_client_vtable* grpc_tcp_client_impl;
+
+static grpc_tcp_client_vtable* default_client_impl;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+gpr_atm g_connection_delay_ms;
+
+void tcp_client_connect_with_delay(grpc_closure* closure, grpc_endpoint** ep,
+ grpc_pollset_set* interested_parties,
+ const grpc_channel_args* channel_args,
+ const grpc_resolved_address* addr,
+ grpc_millis deadline) {
+ const int delay_ms = gpr_atm_acq_load(&g_connection_delay_ms);
+ if (delay_ms > 0) {
+ gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
+ }
+ default_client_impl->connect(closure, ep, interested_parties, channel_args,
+ addr, deadline + delay_ms);
+}
+
+grpc_tcp_client_vtable delayed_connect = {tcp_client_connect_with_delay};
+
+// Subclass of TestServiceImpl that increments a request counter for
+// every call to the Echo RPC.
+class MyTestServiceImpl : public TestServiceImpl {
+ public:
+ Status Echo(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ const udpa::data::orca::v1::OrcaLoadReport* load_report = nullptr;
+ {
+ grpc::internal::MutexLock lock(&mu_);
+ ++request_count_;
+ load_report = load_report_;
+ }
+ AddClient(context->peer().c_str());
+ if (load_report != nullptr) {
+ // TODO(roth): Once we provide a more standard server-side API for
+ // populating this data, use that API here.
+ context->AddTrailingMetadata("x-endpoint-load-metrics-bin",
+ load_report->SerializeAsString());
+ }
+ return TestServiceImpl::Echo(context, request, response);
+ }
+
+ int request_count() {
+ grpc::internal::MutexLock lock(&mu_);
+ return request_count_;
+ }
+
+ void ResetCounters() {
+ grpc::internal::MutexLock lock(&mu_);
+ request_count_ = 0;
+ }
+
std::set<TString> clients() {
- grpc::internal::MutexLock lock(&clients_mu_);
- return clients_;
- }
-
- void set_load_report(udpa::data::orca::v1::OrcaLoadReport* load_report) {
- grpc::internal::MutexLock lock(&mu_);
- load_report_ = load_report;
- }
-
- private:
+ grpc::internal::MutexLock lock(&clients_mu_);
+ return clients_;
+ }
+
+ void set_load_report(udpa::data::orca::v1::OrcaLoadReport* load_report) {
+ grpc::internal::MutexLock lock(&mu_);
+ load_report_ = load_report;
+ }
+
+ private:
void AddClient(const TString& client) {
- grpc::internal::MutexLock lock(&clients_mu_);
- clients_.insert(client);
- }
-
- grpc::internal::Mutex mu_;
- int request_count_ = 0;
- const udpa::data::orca::v1::OrcaLoadReport* load_report_ = nullptr;
- grpc::internal::Mutex clients_mu_;
+ grpc::internal::MutexLock lock(&clients_mu_);
+ clients_.insert(client);
+ }
+
+ grpc::internal::Mutex mu_;
+ int request_count_ = 0;
+ const udpa::data::orca::v1::OrcaLoadReport* load_report_ = nullptr;
+ grpc::internal::Mutex clients_mu_;
std::set<TString> clients_;
-};
-
-class FakeResolverResponseGeneratorWrapper {
- public:
- FakeResolverResponseGeneratorWrapper()
- : response_generator_(grpc_core::MakeRefCounted<
- grpc_core::FakeResolverResponseGenerator>()) {}
-
- FakeResolverResponseGeneratorWrapper(
+};
+
+class FakeResolverResponseGeneratorWrapper {
+ public:
+ FakeResolverResponseGeneratorWrapper()
+ : response_generator_(grpc_core::MakeRefCounted<
+ grpc_core::FakeResolverResponseGenerator>()) {}
+
+ FakeResolverResponseGeneratorWrapper(
FakeResolverResponseGeneratorWrapper&& other) noexcept {
- response_generator_ = std::move(other.response_generator_);
- }
-
+ response_generator_ = std::move(other.response_generator_);
+ }
+
void SetNextResolution(
const std::vector<int>& ports, const char* service_config_json = nullptr,
const char* attribute_key = nullptr,
std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
nullptr) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
response_generator_->SetResponse(BuildFakeResults(
ports, service_config_json, attribute_key, std::move(attribute)));
- }
-
- void SetNextResolutionUponError(const std::vector<int>& ports) {
- grpc_core::ExecCtx exec_ctx;
- response_generator_->SetReresolutionResponse(BuildFakeResults(ports));
- }
-
- void SetFailureOnReresolution() {
- grpc_core::ExecCtx exec_ctx;
- response_generator_->SetFailureOnReresolution();
- }
-
- grpc_core::FakeResolverResponseGenerator* Get() const {
- return response_generator_.get();
- }
-
- private:
- static grpc_core::Resolver::Result BuildFakeResults(
+ }
+
+ void SetNextResolutionUponError(const std::vector<int>& ports) {
+ grpc_core::ExecCtx exec_ctx;
+ response_generator_->SetReresolutionResponse(BuildFakeResults(ports));
+ }
+
+ void SetFailureOnReresolution() {
+ grpc_core::ExecCtx exec_ctx;
+ response_generator_->SetFailureOnReresolution();
+ }
+
+ grpc_core::FakeResolverResponseGenerator* Get() const {
+ return response_generator_.get();
+ }
+
+ private:
+ static grpc_core::Resolver::Result BuildFakeResults(
const std::vector<int>& ports, const char* service_config_json = nullptr,
const char* attribute_key = nullptr,
std::unique_ptr<grpc_core::ServerAddress::AttributeInterface> attribute =
nullptr) {
- grpc_core::Resolver::Result result;
- for (const int& port : ports) {
+ grpc_core::Resolver::Result result;
+ for (const int& port : ports) {
TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", port);
grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
- grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(lb_uri != nullptr);
+ grpc_resolved_address address;
+ GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
std::map<const char*,
std::unique_ptr<grpc_core::ServerAddress::AttributeInterface>>
attributes;
if (attribute != nullptr) {
attributes[attribute_key] = attribute->Copy();
}
- result.addresses.emplace_back(address.addr, address.len,
+ result.addresses.emplace_back(address.addr, address.len,
nullptr /* args */, std::move(attributes));
- grpc_uri_destroy(lb_uri);
- }
- if (service_config_json != nullptr) {
- result.service_config = grpc_core::ServiceConfig::Create(
+ grpc_uri_destroy(lb_uri);
+ }
+ if (service_config_json != nullptr) {
+ result.service_config = grpc_core::ServiceConfig::Create(
nullptr, service_config_json, &result.service_config_error);
- GPR_ASSERT(result.service_config != nullptr);
- }
- return result;
- }
-
- grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
- response_generator_;
-};
-
-class ClientLbEnd2endTest : public ::testing::Test {
- protected:
- ClientLbEnd2endTest()
- : server_host_("localhost"),
- kRequestMessage_("Live long and prosper."),
- creds_(new SecureChannelCredentials(
- grpc_fake_transport_security_credentials_create())) {}
-
- static void SetUpTestCase() {
- // Make the backup poller poll very frequently in order to pick up
- // updates from all the subchannels's FDs.
- GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
- }
-
- void SetUp() override { grpc_init(); }
-
- void TearDown() override {
- for (size_t i = 0; i < servers_.size(); ++i) {
- servers_[i]->Shutdown();
- }
- servers_.clear();
- creds_.reset();
- grpc_shutdown_blocking();
- }
-
- void CreateServers(size_t num_servers,
- std::vector<int> ports = std::vector<int>()) {
- servers_.clear();
- for (size_t i = 0; i < num_servers; ++i) {
- int port = 0;
- if (ports.size() == num_servers) port = ports[i];
- servers_.emplace_back(new ServerData(port));
- }
- }
-
- void StartServer(size_t index) { servers_[index]->Start(server_host_); }
-
- void StartServers(size_t num_servers,
- std::vector<int> ports = std::vector<int>()) {
- CreateServers(num_servers, std::move(ports));
- for (size_t i = 0; i < num_servers; ++i) {
- StartServer(i);
- }
- }
-
- std::vector<int> GetServersPorts(size_t start_index = 0) {
- std::vector<int> ports;
- for (size_t i = start_index; i < servers_.size(); ++i) {
- ports.push_back(servers_[i]->port_);
- }
- return ports;
- }
-
- FakeResolverResponseGeneratorWrapper BuildResolverResponseGenerator() {
- return FakeResolverResponseGeneratorWrapper();
- }
-
- std::unique_ptr<grpc::testing::EchoTestService::Stub> BuildStub(
- const std::shared_ptr<Channel>& channel) {
- return grpc::testing::EchoTestService::NewStub(channel);
- }
-
- std::shared_ptr<Channel> BuildChannel(
+ GPR_ASSERT(result.service_config != nullptr);
+ }
+ return result;
+ }
+
+ grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
+ response_generator_;
+};
+
+class ClientLbEnd2endTest : public ::testing::Test {
+ protected:
+ ClientLbEnd2endTest()
+ : server_host_("localhost"),
+ kRequestMessage_("Live long and prosper."),
+ creds_(new SecureChannelCredentials(
+ grpc_fake_transport_security_credentials_create())) {}
+
+ static void SetUpTestCase() {
+ // Make the backup poller poll very frequently in order to pick up
+ // updates from all the subchannels's FDs.
+ GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+ }
+
+ void SetUp() override { grpc_init(); }
+
+ void TearDown() override {
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ servers_[i]->Shutdown();
+ }
+ servers_.clear();
+ creds_.reset();
+ grpc_shutdown_blocking();
+ }
+
+ void CreateServers(size_t num_servers,
+ std::vector<int> ports = std::vector<int>()) {
+ servers_.clear();
+ for (size_t i = 0; i < num_servers; ++i) {
+ int port = 0;
+ if (ports.size() == num_servers) port = ports[i];
+ servers_.emplace_back(new ServerData(port));
+ }
+ }
+
+ void StartServer(size_t index) { servers_[index]->Start(server_host_); }
+
+ void StartServers(size_t num_servers,
+ std::vector<int> ports = std::vector<int>()) {
+ CreateServers(num_servers, std::move(ports));
+ for (size_t i = 0; i < num_servers; ++i) {
+ StartServer(i);
+ }
+ }
+
+ std::vector<int> GetServersPorts(size_t start_index = 0) {
+ std::vector<int> ports;
+ for (size_t i = start_index; i < servers_.size(); ++i) {
+ ports.push_back(servers_[i]->port_);
+ }
+ return ports;
+ }
+
+ FakeResolverResponseGeneratorWrapper BuildResolverResponseGenerator() {
+ return FakeResolverResponseGeneratorWrapper();
+ }
+
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> BuildStub(
+ const std::shared_ptr<Channel>& channel) {
+ return grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ std::shared_ptr<Channel> BuildChannel(
const TString& lb_policy_name,
- const FakeResolverResponseGeneratorWrapper& response_generator,
- ChannelArguments args = ChannelArguments()) {
- if (lb_policy_name.size() > 0) {
- args.SetLoadBalancingPolicyName(lb_policy_name);
- } // else, default to pick first
- args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
- response_generator.Get());
- return ::grpc::CreateCustomChannel("fake:///", creds_, args);
- }
-
- bool SendRpc(
- const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
- EchoResponse* response = nullptr, int timeout_ms = 1000,
- Status* result = nullptr, bool wait_for_ready = false) {
- const bool local_response = (response == nullptr);
- if (local_response) response = new EchoResponse;
- EchoRequest request;
- request.set_message(kRequestMessage_);
+ const FakeResolverResponseGeneratorWrapper& response_generator,
+ ChannelArguments args = ChannelArguments()) {
+ if (lb_policy_name.size() > 0) {
+ args.SetLoadBalancingPolicyName(lb_policy_name);
+ } // else, default to pick first
+ args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
+ response_generator.Get());
+ return ::grpc::CreateCustomChannel("fake:///", creds_, args);
+ }
+
+ bool SendRpc(
+ const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
+ EchoResponse* response = nullptr, int timeout_ms = 1000,
+ Status* result = nullptr, bool wait_for_ready = false) {
+ const bool local_response = (response == nullptr);
+ if (local_response) response = new EchoResponse;
+ EchoRequest request;
+ request.set_message(kRequestMessage_);
request.mutable_param()->set_echo_metadata(true);
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
- if (wait_for_ready) context.set_wait_for_ready(true);
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
+ if (wait_for_ready) context.set_wait_for_ready(true);
context.AddMetadata("foo", "1");
context.AddMetadata("bar", "2");
context.AddMetadata("baz", "3");
- Status status = stub->Echo(&context, request, response);
- if (result != nullptr) *result = status;
- if (local_response) delete response;
- return status.ok();
- }
-
- void CheckRpcSendOk(
- const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
- const grpc_core::DebugLocation& location, bool wait_for_ready = false) {
- EchoResponse response;
- Status status;
- const bool success =
- SendRpc(stub, &response, 2000, &status, wait_for_ready);
- ASSERT_TRUE(success) << "From " << location.file() << ":" << location.line()
- << "\n"
- << "Error: " << status.error_message() << " "
- << status.error_details();
- ASSERT_EQ(response.message(), kRequestMessage_)
- << "From " << location.file() << ":" << location.line();
- if (!success) abort();
- }
-
- void CheckRpcSendFailure(
- const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub) {
- const bool success = SendRpc(stub);
- EXPECT_FALSE(success);
- }
-
- struct ServerData {
- int port_;
- std::unique_ptr<Server> server_;
- MyTestServiceImpl service_;
- std::unique_ptr<std::thread> thread_;
- bool server_ready_ = false;
- bool started_ = false;
-
- explicit ServerData(int port = 0) {
- port_ = port > 0 ? port : 5100; // grpc_pick_unused_port_or_die();
- }
-
+ Status status = stub->Echo(&context, request, response);
+ if (result != nullptr) *result = status;
+ if (local_response) delete response;
+ return status.ok();
+ }
+
+ void CheckRpcSendOk(
+ const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
+ const grpc_core::DebugLocation& location, bool wait_for_ready = false) {
+ EchoResponse response;
+ Status status;
+ const bool success =
+ SendRpc(stub, &response, 2000, &status, wait_for_ready);
+ ASSERT_TRUE(success) << "From " << location.file() << ":" << location.line()
+ << "\n"
+ << "Error: " << status.error_message() << " "
+ << status.error_details();
+ ASSERT_EQ(response.message(), kRequestMessage_)
+ << "From " << location.file() << ":" << location.line();
+ if (!success) abort();
+ }
+
+ void CheckRpcSendFailure(
+ const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub) {
+ const bool success = SendRpc(stub);
+ EXPECT_FALSE(success);
+ }
+
+ struct ServerData {
+ int port_;
+ std::unique_ptr<Server> server_;
+ MyTestServiceImpl service_;
+ std::unique_ptr<std::thread> thread_;
+ bool server_ready_ = false;
+ bool started_ = false;
+
+ explicit ServerData(int port = 0) {
+ port_ = port > 0 ? port : 5100; // grpc_pick_unused_port_or_die();
+ }
+
void Start(const TString& server_host) {
- gpr_log(GPR_INFO, "starting server on port %d", port_);
- started_ = true;
- grpc::internal::Mutex mu;
- grpc::internal::MutexLock lock(&mu);
- grpc::internal::CondVar cond;
- thread_.reset(new std::thread(
- std::bind(&ServerData::Serve, this, server_host, &mu, &cond)));
- cond.WaitUntil(&mu, [this] { return server_ready_; });
- server_ready_ = false;
- gpr_log(GPR_INFO, "server startup complete");
- }
-
+ gpr_log(GPR_INFO, "starting server on port %d", port_);
+ started_ = true;
+ grpc::internal::Mutex mu;
+ grpc::internal::MutexLock lock(&mu);
+ grpc::internal::CondVar cond;
+ thread_.reset(new std::thread(
+ std::bind(&ServerData::Serve, this, server_host, &mu, &cond)));
+ cond.WaitUntil(&mu, [this] { return server_ready_; });
+ server_ready_ = false;
+ gpr_log(GPR_INFO, "server startup complete");
+ }
+
void Serve(const TString& server_host, grpc::internal::Mutex* mu,
- grpc::internal::CondVar* cond) {
- std::ostringstream server_address;
- server_address << server_host << ":" << port_;
- ServerBuilder builder;
- std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
- grpc_fake_transport_security_server_credentials_create()));
- builder.AddListeningPort(server_address.str(), std::move(creds));
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- grpc::internal::MutexLock lock(mu);
- server_ready_ = true;
- cond->Signal();
- }
-
- void Shutdown() {
- if (!started_) return;
- server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
- thread_->join();
- started_ = false;
- }
-
+ grpc::internal::CondVar* cond) {
+ std::ostringstream server_address;
+ server_address << server_host << ":" << port_;
+ ServerBuilder builder;
+ std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
+ grpc_fake_transport_security_server_credentials_create()));
+ builder.AddListeningPort(server_address.str(), std::move(creds));
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ grpc::internal::MutexLock lock(mu);
+ server_ready_ = true;
+ cond->Signal();
+ }
+
+ void Shutdown() {
+ if (!started_) return;
+ server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
+ thread_->join();
+ started_ = false;
+ }
+
void SetServingStatus(const TString& service, bool serving) {
- server_->GetHealthCheckService()->SetServingStatus(service, serving);
- }
- };
-
- void ResetCounters() {
- for (const auto& server : servers_) server->service_.ResetCounters();
- }
-
- void WaitForServer(
- const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
- size_t server_idx, const grpc_core::DebugLocation& location,
- bool ignore_failure = false) {
- do {
- if (ignore_failure) {
- SendRpc(stub);
- } else {
- CheckRpcSendOk(stub, location, true);
- }
- } while (servers_[server_idx]->service_.request_count() == 0);
- ResetCounters();
- }
-
- bool WaitForChannelState(
- Channel* channel, std::function<bool(grpc_connectivity_state)> predicate,
- bool try_to_connect = false, int timeout_seconds = 5) {
- const gpr_timespec deadline =
- grpc_timeout_seconds_to_deadline(timeout_seconds);
- while (true) {
- grpc_connectivity_state state = channel->GetState(try_to_connect);
- if (predicate(state)) break;
- if (!channel->WaitForStateChange(state, deadline)) return false;
- }
- return true;
- }
-
- bool WaitForChannelNotReady(Channel* channel, int timeout_seconds = 5) {
- auto predicate = [](grpc_connectivity_state state) {
- return state != GRPC_CHANNEL_READY;
- };
- return WaitForChannelState(channel, predicate, false, timeout_seconds);
- }
-
- bool WaitForChannelReady(Channel* channel, int timeout_seconds = 5) {
- auto predicate = [](grpc_connectivity_state state) {
- return state == GRPC_CHANNEL_READY;
- };
- return WaitForChannelState(channel, predicate, true, timeout_seconds);
- }
-
- bool SeenAllServers() {
- for (const auto& server : servers_) {
- if (server->service_.request_count() == 0) return false;
- }
- return true;
- }
-
- // Updates \a connection_order by appending to it the index of the newly
- // connected server. Must be called after every single RPC.
- void UpdateConnectionOrder(
- const std::vector<std::unique_ptr<ServerData>>& servers,
- std::vector<int>* connection_order) {
- for (size_t i = 0; i < servers.size(); ++i) {
- if (servers[i]->service_.request_count() == 1) {
- // Was the server index known? If not, update connection_order.
- const auto it =
- std::find(connection_order->begin(), connection_order->end(), i);
- if (it == connection_order->end()) {
- connection_order->push_back(i);
- return;
- }
- }
- }
- }
-
+ server_->GetHealthCheckService()->SetServingStatus(service, serving);
+ }
+ };
+
+ void ResetCounters() {
+ for (const auto& server : servers_) server->service_.ResetCounters();
+ }
+
+ void WaitForServer(
+ const std::unique_ptr<grpc::testing::EchoTestService::Stub>& stub,
+ size_t server_idx, const grpc_core::DebugLocation& location,
+ bool ignore_failure = false) {
+ do {
+ if (ignore_failure) {
+ SendRpc(stub);
+ } else {
+ CheckRpcSendOk(stub, location, true);
+ }
+ } while (servers_[server_idx]->service_.request_count() == 0);
+ ResetCounters();
+ }
+
+ bool WaitForChannelState(
+ Channel* channel, std::function<bool(grpc_connectivity_state)> predicate,
+ bool try_to_connect = false, int timeout_seconds = 5) {
+ const gpr_timespec deadline =
+ grpc_timeout_seconds_to_deadline(timeout_seconds);
+ while (true) {
+ grpc_connectivity_state state = channel->GetState(try_to_connect);
+ if (predicate(state)) break;
+ if (!channel->WaitForStateChange(state, deadline)) return false;
+ }
+ return true;
+ }
+
+ bool WaitForChannelNotReady(Channel* channel, int timeout_seconds = 5) {
+ auto predicate = [](grpc_connectivity_state state) {
+ return state != GRPC_CHANNEL_READY;
+ };
+ return WaitForChannelState(channel, predicate, false, timeout_seconds);
+ }
+
+ bool WaitForChannelReady(Channel* channel, int timeout_seconds = 5) {
+ auto predicate = [](grpc_connectivity_state state) {
+ return state == GRPC_CHANNEL_READY;
+ };
+ return WaitForChannelState(channel, predicate, true, timeout_seconds);
+ }
+
+ bool SeenAllServers() {
+ for (const auto& server : servers_) {
+ if (server->service_.request_count() == 0) return false;
+ }
+ return true;
+ }
+
+ // Updates \a connection_order by appending to it the index of the newly
+ // connected server. Must be called after every single RPC.
+ void UpdateConnectionOrder(
+ const std::vector<std::unique_ptr<ServerData>>& servers,
+ std::vector<int>* connection_order) {
+ for (size_t i = 0; i < servers.size(); ++i) {
+ if (servers[i]->service_.request_count() == 1) {
+ // Was the server index known? If not, update connection_order.
+ const auto it =
+ std::find(connection_order->begin(), connection_order->end(), i);
+ if (it == connection_order->end()) {
+ connection_order->push_back(i);
+ return;
+ }
+ }
+ }
+ }
+
const TString server_host_;
- std::vector<std::unique_ptr<ServerData>> servers_;
+ std::vector<std::unique_ptr<ServerData>> servers_;
const TString kRequestMessage_;
- std::shared_ptr<ChannelCredentials> creds_;
-};
-
-TEST_F(ClientLbEnd2endTest, ChannelStateConnectingWhenResolving) {
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("", response_generator);
- auto stub = BuildStub(channel);
- // Initial state should be IDLE.
- EXPECT_EQ(channel->GetState(false /* try_to_connect */), GRPC_CHANNEL_IDLE);
- // Tell the channel to try to connect.
- // Note that this call also returns IDLE, since the state change has
- // not yet occurred; it just gets triggered by this call.
- EXPECT_EQ(channel->GetState(true /* try_to_connect */), GRPC_CHANNEL_IDLE);
- // Now that the channel is trying to connect, we should be in state
- // CONNECTING.
- EXPECT_EQ(channel->GetState(false /* try_to_connect */),
- GRPC_CHANNEL_CONNECTING);
- // Return a resolver result, which allows the connection attempt to proceed.
- response_generator.SetNextResolution(GetServersPorts());
- // We should eventually transition into state READY.
- EXPECT_TRUE(WaitForChannelReady(channel.get()));
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirst) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel(
- "", response_generator); // test that pick first is the default.
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- for (size_t i = 0; i < servers_.size(); ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- // All requests should have gone to a single server.
- bool found = false;
- for (size_t i = 0; i < servers_.size(); ++i) {
- const int request_count = servers_[i]->service_.request_count();
- if (request_count == kNumServers) {
- found = true;
- } else {
- EXPECT_EQ(0, request_count);
- }
- }
- EXPECT_TRUE(found);
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstProcessPending) {
- StartServers(1); // Single server
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel(
- "", response_generator); // test that pick first is the default.
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution({servers_[0]->port_});
- WaitForServer(stub, 0, DEBUG_LOCATION);
- // Create a new channel and its corresponding PF LB policy, which will pick
- // the subchannels in READY state from the previous RPC against the same
- // target (even if it happened over a different channel, because subchannels
- // are globally reused). Progress should happen without any transition from
- // this READY state.
- auto second_response_generator = BuildResolverResponseGenerator();
- auto second_channel = BuildChannel("", second_response_generator);
- auto second_stub = BuildStub(second_channel);
- second_response_generator.SetNextResolution({servers_[0]->port_});
- CheckRpcSendOk(second_stub, DEBUG_LOCATION);
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstSelectsReadyAtStartup) {
- ChannelArguments args;
- constexpr int kInitialBackOffMs = 5000;
- args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
- // Create 2 servers, but start only the second one.
- std::vector<int> ports = { 5101, // grpc_pick_unused_port_or_die(),
- 5102}; // grpc_pick_unused_port_or_die()};
- CreateServers(2, ports);
- StartServer(1);
- auto response_generator1 = BuildResolverResponseGenerator();
- auto channel1 = BuildChannel("pick_first", response_generator1, args);
- auto stub1 = BuildStub(channel1);
- response_generator1.SetNextResolution(ports);
- // Wait for second server to be ready.
- WaitForServer(stub1, 1, DEBUG_LOCATION);
- // Create a second channel with the same addresses. Its PF instance
- // should immediately pick the second subchannel, since it's already
- // in READY state.
- auto response_generator2 = BuildResolverResponseGenerator();
- auto channel2 = BuildChannel("pick_first", response_generator2, args);
- response_generator2.SetNextResolution(ports);
- // Check that the channel reports READY without waiting for the
- // initial backoff.
- EXPECT_TRUE(WaitForChannelReady(channel2.get(), 1 /* timeout_seconds */));
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstBackOffInitialReconnect) {
- ChannelArguments args;
- constexpr int kInitialBackOffMs = 100;
- args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
- const std::vector<int> ports = {5103}; // {grpc_pick_unused_port_or_die()};
- const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- // The channel won't become connected (there's no server).
- ASSERT_FALSE(channel->WaitForConnected(
- grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
- // Bring up a server on the chosen port.
- StartServers(1, ports);
- // Now it will.
- ASSERT_TRUE(channel->WaitForConnected(
- grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
- const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
- const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
- gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
- // We should have waited at least kInitialBackOffMs. We substract one to
- // account for test and precision accuracy drift.
- EXPECT_GE(waited_ms, kInitialBackOffMs - 1);
- // But not much more.
- EXPECT_GT(
- gpr_time_cmp(
- grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 1.10), t1),
- 0);
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstBackOffMinReconnect) {
- ChannelArguments args;
- constexpr int kMinReconnectBackOffMs = 1000;
- args.SetInt(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, kMinReconnectBackOffMs);
- const std::vector<int> ports = {5104}; // {grpc_pick_unused_port_or_die()};
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- // Make connection delay a 10% longer than it's willing to in order to make
- // sure we are hitting the codepath that waits for the min reconnect backoff.
- gpr_atm_rel_store(&g_connection_delay_ms, kMinReconnectBackOffMs * 1.10);
- default_client_impl = grpc_tcp_client_impl;
- grpc_set_tcp_client_impl(&delayed_connect);
- const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
- channel->WaitForConnected(
- grpc_timeout_milliseconds_to_deadline(kMinReconnectBackOffMs * 2));
- const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
- const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
- gpr_log(GPR_DEBUG, "Waited %" PRId64 " ms", waited_ms);
- // We should have waited at least kMinReconnectBackOffMs. We substract one to
- // account for test and precision accuracy drift.
- EXPECT_GE(waited_ms, kMinReconnectBackOffMs - 1);
- gpr_atm_rel_store(&g_connection_delay_ms, 0);
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstResetConnectionBackoff) {
- ChannelArguments args;
- constexpr int kInitialBackOffMs = 1000;
- args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
- const std::vector<int> ports = {5105}; // {grpc_pick_unused_port_or_die()};
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- // The channel won't become connected (there's no server).
- EXPECT_FALSE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
- // Bring up a server on the chosen port.
- StartServers(1, ports);
- const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
- // Wait for connect, but not long enough. This proves that we're
- // being throttled by initial backoff.
- EXPECT_FALSE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
- // Reset connection backoff.
- experimental::ChannelResetConnectionBackoff(channel.get());
+ std::shared_ptr<ChannelCredentials> creds_;
+};
+
+TEST_F(ClientLbEnd2endTest, ChannelStateConnectingWhenResolving) {
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("", response_generator);
+ auto stub = BuildStub(channel);
+ // Initial state should be IDLE.
+ EXPECT_EQ(channel->GetState(false /* try_to_connect */), GRPC_CHANNEL_IDLE);
+ // Tell the channel to try to connect.
+ // Note that this call also returns IDLE, since the state change has
+ // not yet occurred; it just gets triggered by this call.
+ EXPECT_EQ(channel->GetState(true /* try_to_connect */), GRPC_CHANNEL_IDLE);
+ // Now that the channel is trying to connect, we should be in state
+ // CONNECTING.
+ EXPECT_EQ(channel->GetState(false /* try_to_connect */),
+ GRPC_CHANNEL_CONNECTING);
+ // Return a resolver result, which allows the connection attempt to proceed.
+ response_generator.SetNextResolution(GetServersPorts());
+ // We should eventually transition into state READY.
+ EXPECT_TRUE(WaitForChannelReady(channel.get()));
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirst) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel(
+ "", response_generator); // test that pick first is the default.
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ // All requests should have gone to a single server.
+ bool found = false;
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ const int request_count = servers_[i]->service_.request_count();
+ if (request_count == kNumServers) {
+ found = true;
+ } else {
+ EXPECT_EQ(0, request_count);
+ }
+ }
+ EXPECT_TRUE(found);
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstProcessPending) {
+ StartServers(1); // Single server
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel(
+ "", response_generator); // test that pick first is the default.
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution({servers_[0]->port_});
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ // Create a new channel and its corresponding PF LB policy, which will pick
+ // the subchannels in READY state from the previous RPC against the same
+ // target (even if it happened over a different channel, because subchannels
+ // are globally reused). Progress should happen without any transition from
+ // this READY state.
+ auto second_response_generator = BuildResolverResponseGenerator();
+ auto second_channel = BuildChannel("", second_response_generator);
+ auto second_stub = BuildStub(second_channel);
+ second_response_generator.SetNextResolution({servers_[0]->port_});
+ CheckRpcSendOk(second_stub, DEBUG_LOCATION);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstSelectsReadyAtStartup) {
+ ChannelArguments args;
+ constexpr int kInitialBackOffMs = 5000;
+ args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
+ // Create 2 servers, but start only the second one.
+ std::vector<int> ports = { 5101, // grpc_pick_unused_port_or_die(),
+ 5102}; // grpc_pick_unused_port_or_die()};
+ CreateServers(2, ports);
+ StartServer(1);
+ auto response_generator1 = BuildResolverResponseGenerator();
+ auto channel1 = BuildChannel("pick_first", response_generator1, args);
+ auto stub1 = BuildStub(channel1);
+ response_generator1.SetNextResolution(ports);
+ // Wait for second server to be ready.
+ WaitForServer(stub1, 1, DEBUG_LOCATION);
+ // Create a second channel with the same addresses. Its PF instance
+ // should immediately pick the second subchannel, since it's already
+ // in READY state.
+ auto response_generator2 = BuildResolverResponseGenerator();
+ auto channel2 = BuildChannel("pick_first", response_generator2, args);
+ response_generator2.SetNextResolution(ports);
+ // Check that the channel reports READY without waiting for the
+ // initial backoff.
+ EXPECT_TRUE(WaitForChannelReady(channel2.get(), 1 /* timeout_seconds */));
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstBackOffInitialReconnect) {
+ ChannelArguments args;
+ constexpr int kInitialBackOffMs = 100;
+ args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
+ const std::vector<int> ports = {5103}; // {grpc_pick_unused_port_or_die()};
+ const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ // The channel won't become connected (there's no server).
+ ASSERT_FALSE(channel->WaitForConnected(
+ grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
+ // Bring up a server on the chosen port.
+ StartServers(1, ports);
+ // Now it will.
+ ASSERT_TRUE(channel->WaitForConnected(
+ grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
+ const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
+ const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
+ gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
+ // We should have waited at least kInitialBackOffMs. We substract one to
+ // account for test and precision accuracy drift.
+ EXPECT_GE(waited_ms, kInitialBackOffMs - 1);
+ // But not much more.
+ EXPECT_GT(
+ gpr_time_cmp(
+ grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 1.10), t1),
+ 0);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstBackOffMinReconnect) {
+ ChannelArguments args;
+ constexpr int kMinReconnectBackOffMs = 1000;
+ args.SetInt(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, kMinReconnectBackOffMs);
+ const std::vector<int> ports = {5104}; // {grpc_pick_unused_port_or_die()};
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ // Make connection delay a 10% longer than it's willing to in order to make
+ // sure we are hitting the codepath that waits for the min reconnect backoff.
+ gpr_atm_rel_store(&g_connection_delay_ms, kMinReconnectBackOffMs * 1.10);
+ default_client_impl = grpc_tcp_client_impl;
+ grpc_set_tcp_client_impl(&delayed_connect);
+ const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
+ channel->WaitForConnected(
+ grpc_timeout_milliseconds_to_deadline(kMinReconnectBackOffMs * 2));
+ const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
+ const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
+ gpr_log(GPR_DEBUG, "Waited %" PRId64 " ms", waited_ms);
+ // We should have waited at least kMinReconnectBackOffMs. We substract one to
+ // account for test and precision accuracy drift.
+ EXPECT_GE(waited_ms, kMinReconnectBackOffMs - 1);
+ gpr_atm_rel_store(&g_connection_delay_ms, 0);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstResetConnectionBackoff) {
+ ChannelArguments args;
+ constexpr int kInitialBackOffMs = 1000;
+ args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
+ const std::vector<int> ports = {5105}; // {grpc_pick_unused_port_or_die()};
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ // The channel won't become connected (there's no server).
+ EXPECT_FALSE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
+ // Bring up a server on the chosen port.
+ StartServers(1, ports);
+ const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
+ // Wait for connect, but not long enough. This proves that we're
+ // being throttled by initial backoff.
+ EXPECT_FALSE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
+ // Reset connection backoff.
+ experimental::ChannelResetConnectionBackoff(channel.get());
// Wait for connect. Should happen as soon as the client connects to
// the newly started server, which should be before the initial
// backoff timeout elapses.
- EXPECT_TRUE(
+ EXPECT_TRUE(
channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(20)));
- const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
- const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
- gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
- // We should have waited less than kInitialBackOffMs.
- EXPECT_LT(waited_ms, kInitialBackOffMs);
-}
-
-TEST_F(ClientLbEnd2endTest,
- PickFirstResetConnectionBackoffNextAttemptStartsImmediately) {
- ChannelArguments args;
- constexpr int kInitialBackOffMs = 1000;
- args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
- const std::vector<int> ports = {5106}; // {grpc_pick_unused_port_or_die()};
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- // Wait for connect, which should fail ~immediately, because the server
- // is not up.
- gpr_log(GPR_INFO, "=== INITIAL CONNECTION ATTEMPT");
- EXPECT_FALSE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
- // Reset connection backoff.
- // Note that the time at which the third attempt will be started is
- // actually computed at this point, so we record the start time here.
- gpr_log(GPR_INFO, "=== RESETTING BACKOFF");
- const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
- experimental::ChannelResetConnectionBackoff(channel.get());
- // Trigger a second connection attempt. This should also fail
- // ~immediately, but the retry should be scheduled for
- // kInitialBackOffMs instead of applying the multiplier.
- gpr_log(GPR_INFO, "=== POLLING FOR SECOND CONNECTION ATTEMPT");
- EXPECT_FALSE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
- // Bring up a server on the chosen port.
- gpr_log(GPR_INFO, "=== STARTING BACKEND");
- StartServers(1, ports);
- // Wait for connect. Should happen within kInitialBackOffMs.
- // Give an extra 100ms to account for the time spent in the second and
- // third connection attempts themselves (since what we really want to
- // measure is the time between the two). As long as this is less than
- // the 1.6x increase we would see if the backoff state was not reset
- // properly, the test is still proving that the backoff was reset.
- constexpr int kWaitMs = kInitialBackOffMs + 100;
- gpr_log(GPR_INFO, "=== POLLING FOR THIRD CONNECTION ATTEMPT");
- EXPECT_TRUE(channel->WaitForConnected(
- grpc_timeout_milliseconds_to_deadline(kWaitMs)));
- const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
- const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
- gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
- EXPECT_LT(waited_ms, kWaitMs);
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstUpdates) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator);
- auto stub = BuildStub(channel);
-
- std::vector<int> ports;
-
- // Perform one RPC against the first server.
- ports.emplace_back(servers_[0]->port_);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** SET [0] *******");
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(servers_[0]->service_.request_count(), 1);
-
- // An empty update will result in the channel going into TRANSIENT_FAILURE.
- ports.clear();
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** SET none *******");
- grpc_connectivity_state channel_state;
- do {
- channel_state = channel->GetState(true /* try to connect */);
- } while (channel_state == GRPC_CHANNEL_READY);
- ASSERT_NE(channel_state, GRPC_CHANNEL_READY);
- servers_[0]->service_.ResetCounters();
-
- // Next update introduces servers_[1], making the channel recover.
- ports.clear();
- ports.emplace_back(servers_[1]->port_);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** SET [1] *******");
- WaitForServer(stub, 1, DEBUG_LOCATION);
- EXPECT_EQ(servers_[0]->service_.request_count(), 0);
-
- // And again for servers_[2]
- ports.clear();
- ports.emplace_back(servers_[2]->port_);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** SET [2] *******");
- WaitForServer(stub, 2, DEBUG_LOCATION);
- EXPECT_EQ(servers_[0]->service_.request_count(), 0);
- EXPECT_EQ(servers_[1]->service_.request_count(), 0);
-
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstUpdateSuperset) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator);
- auto stub = BuildStub(channel);
-
- std::vector<int> ports;
-
- // Perform one RPC against the first server.
- ports.emplace_back(servers_[0]->port_);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** SET [0] *******");
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(servers_[0]->service_.request_count(), 1);
- servers_[0]->service_.ResetCounters();
-
- // Send and superset update
- ports.clear();
- ports.emplace_back(servers_[1]->port_);
- ports.emplace_back(servers_[0]->port_);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** SET superset *******");
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- // We stick to the previously connected server.
- WaitForServer(stub, 0, DEBUG_LOCATION);
- EXPECT_EQ(0, servers_[1]->service_.request_count());
-
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstGlobalSubchannelPool) {
- // Start one server.
- const int kNumServers = 1;
- StartServers(kNumServers);
- std::vector<int> ports = GetServersPorts();
- // Create two channels that (by default) use the global subchannel pool.
- auto response_generator1 = BuildResolverResponseGenerator();
- auto channel1 = BuildChannel("pick_first", response_generator1);
- auto stub1 = BuildStub(channel1);
- response_generator1.SetNextResolution(ports);
- auto response_generator2 = BuildResolverResponseGenerator();
- auto channel2 = BuildChannel("pick_first", response_generator2);
- auto stub2 = BuildStub(channel2);
- response_generator2.SetNextResolution(ports);
- WaitForServer(stub1, 0, DEBUG_LOCATION);
- // Send one RPC on each channel.
- CheckRpcSendOk(stub1, DEBUG_LOCATION);
- CheckRpcSendOk(stub2, DEBUG_LOCATION);
- // The server receives two requests.
- EXPECT_EQ(2, servers_[0]->service_.request_count());
- // The two requests are from the same client port, because the two channels
- // share subchannels via the global subchannel pool.
- EXPECT_EQ(1UL, servers_[0]->service_.clients().size());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstLocalSubchannelPool) {
- // Start one server.
- const int kNumServers = 1;
- StartServers(kNumServers);
- std::vector<int> ports = GetServersPorts();
- // Create two channels that use local subchannel pool.
- ChannelArguments args;
- args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
- auto response_generator1 = BuildResolverResponseGenerator();
- auto channel1 = BuildChannel("pick_first", response_generator1, args);
- auto stub1 = BuildStub(channel1);
- response_generator1.SetNextResolution(ports);
- auto response_generator2 = BuildResolverResponseGenerator();
- auto channel2 = BuildChannel("pick_first", response_generator2, args);
- auto stub2 = BuildStub(channel2);
- response_generator2.SetNextResolution(ports);
- WaitForServer(stub1, 0, DEBUG_LOCATION);
- // Send one RPC on each channel.
- CheckRpcSendOk(stub1, DEBUG_LOCATION);
- CheckRpcSendOk(stub2, DEBUG_LOCATION);
- // The server receives two requests.
- EXPECT_EQ(2, servers_[0]->service_.request_count());
- // The two requests are from two client ports, because the two channels didn't
- // share subchannels with each other.
- EXPECT_EQ(2UL, servers_[0]->service_.clients().size());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstManyUpdates) {
- const int kNumUpdates = 1000;
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator);
- auto stub = BuildStub(channel);
- std::vector<int> ports = GetServersPorts();
- for (size_t i = 0; i < kNumUpdates; ++i) {
- std::shuffle(ports.begin(), ports.end(),
- std::mt19937(std::random_device()()));
- response_generator.SetNextResolution(ports);
- // We should re-enter core at the end of the loop to give the resolution
- // setting closure a chance to run.
- if ((i + 1) % 10 == 0) CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstReresolutionNoSelected) {
- // Prepare the ports for up servers and down servers.
- const int kNumServers = 3;
- const int kNumAliveServers = 1;
- StartServers(kNumAliveServers);
- std::vector<int> alive_ports, dead_ports;
- for (size_t i = 0; i < kNumServers; ++i) {
- if (i < kNumAliveServers) {
- alive_ports.emplace_back(servers_[i]->port_);
- } else {
- dead_ports.emplace_back(5107 + i);
- // dead_ports.emplace_back(grpc_pick_unused_port_or_die());
- }
- }
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator);
- auto stub = BuildStub(channel);
- // The initial resolution only contains dead ports. There won't be any
- // selected subchannel. Re-resolution will return the same result.
- response_generator.SetNextResolution(dead_ports);
- gpr_log(GPR_INFO, "****** INITIAL RESOLUTION SET *******");
- for (size_t i = 0; i < 10; ++i) CheckRpcSendFailure(stub);
- // Set a re-resolution result that contains reachable ports, so that the
- // pick_first LB policy can recover soon.
- response_generator.SetNextResolutionUponError(alive_ports);
- gpr_log(GPR_INFO, "****** RE-RESOLUTION SET *******");
- WaitForServer(stub, 0, DEBUG_LOCATION, true /* ignore_failure */);
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(servers_[0]->service_.request_count(), 1);
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstReconnectWithoutNewResolverResult) {
- std::vector<int> ports = {5110}; // {grpc_pick_unused_port_or_die()};
- StartServers(1, ports);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** INITIAL CONNECTION *******");
- WaitForServer(stub, 0, DEBUG_LOCATION);
- gpr_log(GPR_INFO, "****** STOPPING SERVER ******");
- servers_[0]->Shutdown();
- EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
- gpr_log(GPR_INFO, "****** RESTARTING SERVER ******");
- StartServers(1, ports);
- WaitForServer(stub, 0, DEBUG_LOCATION);
-}
-
-TEST_F(ClientLbEnd2endTest,
- PickFirstReconnectWithoutNewResolverResultStartsFromTopOfList) {
- std::vector<int> ports = {5111, // grpc_pick_unused_port_or_die(),
- 5112}; // grpc_pick_unused_port_or_die()};
- CreateServers(2, ports);
- StartServer(1);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("pick_first", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** INITIAL CONNECTION *******");
- WaitForServer(stub, 1, DEBUG_LOCATION);
- gpr_log(GPR_INFO, "****** STOPPING SERVER ******");
- servers_[1]->Shutdown();
- EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
- gpr_log(GPR_INFO, "****** STARTING BOTH SERVERS ******");
- StartServers(2, ports);
- WaitForServer(stub, 0, DEBUG_LOCATION);
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstCheckStateBeforeStartWatch) {
- std::vector<int> ports = {5113}; // {grpc_pick_unused_port_or_die()};
- StartServers(1, ports);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel_1 = BuildChannel("pick_first", response_generator);
- auto stub_1 = BuildStub(channel_1);
- response_generator.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** RESOLUTION SET FOR CHANNEL 1 *******");
- WaitForServer(stub_1, 0, DEBUG_LOCATION);
- gpr_log(GPR_INFO, "****** CHANNEL 1 CONNECTED *******");
- servers_[0]->Shutdown();
- // Channel 1 will receive a re-resolution containing the same server. It will
- // create a new subchannel and hold a ref to it.
- StartServers(1, ports);
- gpr_log(GPR_INFO, "****** SERVER RESTARTED *******");
- auto response_generator_2 = BuildResolverResponseGenerator();
- auto channel_2 = BuildChannel("pick_first", response_generator_2);
- auto stub_2 = BuildStub(channel_2);
- response_generator_2.SetNextResolution(ports);
- gpr_log(GPR_INFO, "****** RESOLUTION SET FOR CHANNEL 2 *******");
- WaitForServer(stub_2, 0, DEBUG_LOCATION, true);
- gpr_log(GPR_INFO, "****** CHANNEL 2 CONNECTED *******");
- servers_[0]->Shutdown();
- // Wait until the disconnection has triggered the connectivity notification.
- // Otherwise, the subchannel may be picked for next call but will fail soon.
- EXPECT_TRUE(WaitForChannelNotReady(channel_2.get()));
- // Channel 2 will also receive a re-resolution containing the same server.
- // Both channels will ref the same subchannel that failed.
- StartServers(1, ports);
- gpr_log(GPR_INFO, "****** SERVER RESTARTED AGAIN *******");
- gpr_log(GPR_INFO, "****** CHANNEL 2 STARTING A CALL *******");
- // The first call after the server restart will succeed.
- CheckRpcSendOk(stub_2, DEBUG_LOCATION);
- gpr_log(GPR_INFO, "****** CHANNEL 2 FINISHED A CALL *******");
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel_1->GetLoadBalancingPolicyName());
- // Check LB policy name for the channel.
- EXPECT_EQ("pick_first", channel_2->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstIdleOnDisconnect) {
- // Start server, send RPC, and make sure channel is READY.
- const int kNumServers = 1;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel =
- BuildChannel("", response_generator); // pick_first is the default.
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
- // Stop server. Channel should go into state IDLE.
- response_generator.SetFailureOnReresolution();
- servers_[0]->Shutdown();
- EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
- servers_.clear();
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstPendingUpdateAndSelectedSubchannelFails) {
- auto response_generator = BuildResolverResponseGenerator();
- auto channel =
- BuildChannel("", response_generator); // pick_first is the default.
- auto stub = BuildStub(channel);
- // Create a number of servers, but only start 1 of them.
- CreateServers(10);
- StartServer(0);
- // Initially resolve to first server and make sure it connects.
- gpr_log(GPR_INFO, "Phase 1: Connect to first server.");
- response_generator.SetNextResolution({servers_[0]->port_});
- CheckRpcSendOk(stub, DEBUG_LOCATION, true /* wait_for_ready */);
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
- // Send a resolution update with the remaining servers, none of which are
- // running yet, so the update will stay pending. Note that it's important
- // to have multiple servers here, or else the test will be flaky; with only
- // one server, the pending subchannel list has already gone into
- // TRANSIENT_FAILURE due to hitting the end of the list by the time we
- // check the state.
- gpr_log(GPR_INFO,
- "Phase 2: Resolver update pointing to remaining "
- "(not started) servers.");
- response_generator.SetNextResolution(GetServersPorts(1 /* start_index */));
- // RPCs will continue to be sent to the first server.
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- // Now stop the first server, so that the current subchannel list
- // fails. This should cause us to immediately swap over to the
- // pending list, even though it's not yet connected. The state should
- // be set to CONNECTING, since that's what the pending subchannel list
- // was doing when we swapped over.
- gpr_log(GPR_INFO, "Phase 3: Stopping first server.");
- servers_[0]->Shutdown();
- WaitForChannelNotReady(channel.get());
- // TODO(roth): This should always return CONNECTING, but it's flaky
- // between that and TRANSIENT_FAILURE. I suspect that this problem
- // will go away once we move the backoff code out of the subchannel
- // and into the LB policies.
- EXPECT_THAT(channel->GetState(false),
- ::testing::AnyOf(GRPC_CHANNEL_CONNECTING,
- GRPC_CHANNEL_TRANSIENT_FAILURE));
- // Now start the second server.
- gpr_log(GPR_INFO, "Phase 4: Starting second server.");
- StartServer(1);
- // The channel should go to READY state and RPCs should go to the
- // second server.
- WaitForChannelReady(channel.get());
- WaitForServer(stub, 1, DEBUG_LOCATION, true /* ignore_failure */);
-}
-
-TEST_F(ClientLbEnd2endTest, PickFirstStaysIdleUponEmptyUpdate) {
- // Start server, send RPC, and make sure channel is READY.
- const int kNumServers = 1;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel =
- BuildChannel("", response_generator); // pick_first is the default.
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
- // Stop server. Channel should go into state IDLE.
- servers_[0]->Shutdown();
- EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
- // Now send resolver update that includes no addresses. Channel
- // should stay in state IDLE.
- response_generator.SetNextResolution({});
- EXPECT_FALSE(channel->WaitForStateChange(
- GRPC_CHANNEL_IDLE, grpc_timeout_seconds_to_deadline(3)));
- // Now bring the backend back up and send a non-empty resolver update,
- // and then try to send an RPC. Channel should go back into state READY.
- StartServer(0);
- response_generator.SetNextResolution(GetServersPorts());
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobin) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- // Wait until all backends are ready.
- do {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- } while (!SeenAllServers());
- ResetCounters();
- // "Sync" to the end of the list. Next sequence of picks will start at the
- // first server (index 0).
- WaitForServer(stub, servers_.size() - 1, DEBUG_LOCATION);
- std::vector<int> connection_order;
- for (size_t i = 0; i < servers_.size(); ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- UpdateConnectionOrder(servers_, &connection_order);
- }
- // Backends should be iterated over in the order in which the addresses were
- // given.
- const auto expected = std::vector<int>{0, 1, 2};
- EXPECT_EQ(expected, connection_order);
- // Check LB policy name for the channel.
- EXPECT_EQ("round_robin", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinProcessPending) {
- StartServers(1); // Single server
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution({servers_[0]->port_});
- WaitForServer(stub, 0, DEBUG_LOCATION);
- // Create a new channel and its corresponding RR LB policy, which will pick
- // the subchannels in READY state from the previous RPC against the same
- // target (even if it happened over a different channel, because subchannels
- // are globally reused). Progress should happen without any transition from
- // this READY state.
- auto second_response_generator = BuildResolverResponseGenerator();
- auto second_channel = BuildChannel("round_robin", second_response_generator);
- auto second_stub = BuildStub(second_channel);
- second_response_generator.SetNextResolution({servers_[0]->port_});
- CheckRpcSendOk(second_stub, DEBUG_LOCATION);
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinUpdates) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- std::vector<int> ports;
- // Start with a single server.
- gpr_log(GPR_INFO, "*** FIRST BACKEND ***");
- ports.emplace_back(servers_[0]->port_);
- response_generator.SetNextResolution(ports);
- WaitForServer(stub, 0, DEBUG_LOCATION);
- // Send RPCs. They should all go servers_[0]
- for (size_t i = 0; i < 10; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(10, servers_[0]->service_.request_count());
- EXPECT_EQ(0, servers_[1]->service_.request_count());
- EXPECT_EQ(0, servers_[2]->service_.request_count());
- servers_[0]->service_.ResetCounters();
- // And now for the second server.
- gpr_log(GPR_INFO, "*** SECOND BACKEND ***");
- ports.clear();
- ports.emplace_back(servers_[1]->port_);
- response_generator.SetNextResolution(ports);
- // Wait until update has been processed, as signaled by the second backend
- // receiving a request.
- EXPECT_EQ(0, servers_[1]->service_.request_count());
- WaitForServer(stub, 1, DEBUG_LOCATION);
- for (size_t i = 0; i < 10; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(0, servers_[0]->service_.request_count());
- EXPECT_EQ(10, servers_[1]->service_.request_count());
- EXPECT_EQ(0, servers_[2]->service_.request_count());
- servers_[1]->service_.ResetCounters();
- // ... and for the last server.
- gpr_log(GPR_INFO, "*** THIRD BACKEND ***");
- ports.clear();
- ports.emplace_back(servers_[2]->port_);
- response_generator.SetNextResolution(ports);
- WaitForServer(stub, 2, DEBUG_LOCATION);
- for (size_t i = 0; i < 10; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(0, servers_[0]->service_.request_count());
- EXPECT_EQ(0, servers_[1]->service_.request_count());
- EXPECT_EQ(10, servers_[2]->service_.request_count());
- servers_[2]->service_.ResetCounters();
- // Back to all servers.
- gpr_log(GPR_INFO, "*** ALL BACKENDS ***");
- ports.clear();
- ports.emplace_back(servers_[0]->port_);
- ports.emplace_back(servers_[1]->port_);
- ports.emplace_back(servers_[2]->port_);
- response_generator.SetNextResolution(ports);
- WaitForServer(stub, 0, DEBUG_LOCATION);
- WaitForServer(stub, 1, DEBUG_LOCATION);
- WaitForServer(stub, 2, DEBUG_LOCATION);
- // Send three RPCs, one per server.
- for (size_t i = 0; i < 3; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(1, servers_[0]->service_.request_count());
- EXPECT_EQ(1, servers_[1]->service_.request_count());
- EXPECT_EQ(1, servers_[2]->service_.request_count());
- // An empty update will result in the channel going into TRANSIENT_FAILURE.
- gpr_log(GPR_INFO, "*** NO BACKENDS ***");
- ports.clear();
- response_generator.SetNextResolution(ports);
- grpc_connectivity_state channel_state;
- do {
- channel_state = channel->GetState(true /* try to connect */);
- } while (channel_state == GRPC_CHANNEL_READY);
- ASSERT_NE(channel_state, GRPC_CHANNEL_READY);
- servers_[0]->service_.ResetCounters();
- // Next update introduces servers_[1], making the channel recover.
- gpr_log(GPR_INFO, "*** BACK TO SECOND BACKEND ***");
- ports.clear();
- ports.emplace_back(servers_[1]->port_);
- response_generator.SetNextResolution(ports);
- WaitForServer(stub, 1, DEBUG_LOCATION);
- channel_state = channel->GetState(false /* try to connect */);
- ASSERT_EQ(channel_state, GRPC_CHANNEL_READY);
- // Check LB policy name for the channel.
- EXPECT_EQ("round_robin", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinUpdateInError) {
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- std::vector<int> ports;
- // Start with a single server.
- ports.emplace_back(servers_[0]->port_);
- response_generator.SetNextResolution(ports);
- WaitForServer(stub, 0, DEBUG_LOCATION);
- // Send RPCs. They should all go to servers_[0]
- for (size_t i = 0; i < 10; ++i) SendRpc(stub);
- EXPECT_EQ(10, servers_[0]->service_.request_count());
- EXPECT_EQ(0, servers_[1]->service_.request_count());
- EXPECT_EQ(0, servers_[2]->service_.request_count());
- servers_[0]->service_.ResetCounters();
- // Shutdown one of the servers to be sent in the update.
- servers_[1]->Shutdown();
- ports.emplace_back(servers_[1]->port_);
- ports.emplace_back(servers_[2]->port_);
- response_generator.SetNextResolution(ports);
- WaitForServer(stub, 0, DEBUG_LOCATION);
- WaitForServer(stub, 2, DEBUG_LOCATION);
- // Send three RPCs, one per server.
- for (size_t i = 0; i < kNumServers; ++i) SendRpc(stub);
- // The server in shutdown shouldn't receive any.
- EXPECT_EQ(0, servers_[1]->service_.request_count());
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinManyUpdates) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- std::vector<int> ports = GetServersPorts();
- for (size_t i = 0; i < 1000; ++i) {
- std::shuffle(ports.begin(), ports.end(),
- std::mt19937(std::random_device()()));
- response_generator.SetNextResolution(ports);
- if (i % 10 == 0) CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- // Check LB policy name for the channel.
- EXPECT_EQ("round_robin", channel->GetLoadBalancingPolicyName());
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinConcurrentUpdates) {
- // TODO(dgq): replicate the way internal testing exercises the concurrent
- // update provisions of RR.
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinReresolve) {
- // Start servers and send one RPC per server.
- const int kNumServers = 3;
- std::vector<int> first_ports;
- std::vector<int> second_ports;
- first_ports.reserve(kNumServers);
- for (int i = 0; i < kNumServers; ++i) {
- // first_ports.push_back(grpc_pick_unused_port_or_die());
- first_ports.push_back(5114 + i);
- }
- second_ports.reserve(kNumServers);
- for (int i = 0; i < kNumServers; ++i) {
- // second_ports.push_back(grpc_pick_unused_port_or_die());
- second_ports.push_back(5117 + i);
- }
- StartServers(kNumServers, first_ports);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(first_ports);
- // Send a number of RPCs, which succeed.
- for (size_t i = 0; i < 100; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- // Kill all servers
- gpr_log(GPR_INFO, "****** ABOUT TO KILL SERVERS *******");
- for (size_t i = 0; i < servers_.size(); ++i) {
- servers_[i]->Shutdown();
- }
- gpr_log(GPR_INFO, "****** SERVERS KILLED *******");
- gpr_log(GPR_INFO, "****** SENDING DOOMED REQUESTS *******");
- // Client requests should fail. Send enough to tickle all subchannels.
- for (size_t i = 0; i < servers_.size(); ++i) CheckRpcSendFailure(stub);
- gpr_log(GPR_INFO, "****** DOOMED REQUESTS SENT *******");
- // Bring servers back up on a different set of ports. We need to do this to be
- // sure that the eventual success is *not* due to subchannel reconnection
- // attempts and that an actual re-resolution has happened as a result of the
- // RR policy going into transient failure when all its subchannels become
- // unavailable (in transient failure as well).
- gpr_log(GPR_INFO, "****** RESTARTING SERVERS *******");
- StartServers(kNumServers, second_ports);
- // Don't notify of the update. Wait for the LB policy's re-resolution to
- // "pull" the new ports.
- response_generator.SetNextResolutionUponError(second_ports);
- gpr_log(GPR_INFO, "****** SERVERS RESTARTED *******");
- gpr_log(GPR_INFO, "****** SENDING REQUEST TO SUCCEED *******");
- // Client request should eventually (but still fairly soon) succeed.
- const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- while (gpr_time_cmp(deadline, now) > 0) {
- if (SendRpc(stub)) break;
- now = gpr_now(GPR_CLOCK_MONOTONIC);
- }
- ASSERT_GT(gpr_time_cmp(deadline, now), 0);
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinTransientFailure) {
- // Start servers and create channel. Channel should go to READY state.
- const int kNumServers = 3;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- EXPECT_TRUE(WaitForChannelReady(channel.get()));
- // Now kill the servers. The channel should transition to TRANSIENT_FAILURE.
- // TODO(roth): This test should ideally check that even when the
- // subchannels are in state CONNECTING for an extended period of time,
- // we will still report TRANSIENT_FAILURE. Unfortunately, we don't
- // currently have a good way to get a subchannel to report CONNECTING
- // for a long period of time, since the servers in this test framework
- // are on the loopback interface, which will immediately return a
- // "Connection refused" error, so the subchannels will only be in
- // CONNECTING state very briefly. When we have time, see if we can
- // find a way to fix this.
- for (size_t i = 0; i < servers_.size(); ++i) {
- servers_[i]->Shutdown();
- }
- auto predicate = [](grpc_connectivity_state state) {
- return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
- };
- EXPECT_TRUE(WaitForChannelState(channel.get(), predicate));
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinTransientFailureAtStartup) {
- // Create channel and return servers that don't exist. Channel should
- // quickly transition into TRANSIENT_FAILURE.
- // TODO(roth): This test should ideally check that even when the
- // subchannels are in state CONNECTING for an extended period of time,
- // we will still report TRANSIENT_FAILURE. Unfortunately, we don't
- // currently have a good way to get a subchannel to report CONNECTING
- // for a long period of time, since the servers in this test framework
- // are on the loopback interface, which will immediately return a
- // "Connection refused" error, so the subchannels will only be in
- // CONNECTING state very briefly. When we have time, see if we can
- // find a way to fix this.
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution({
- grpc_pick_unused_port_or_die(),
- grpc_pick_unused_port_or_die(),
- grpc_pick_unused_port_or_die(),
- });
- for (size_t i = 0; i < servers_.size(); ++i) {
- servers_[i]->Shutdown();
- }
- auto predicate = [](grpc_connectivity_state state) {
- return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
- };
- EXPECT_TRUE(WaitForChannelState(channel.get(), predicate, true));
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinSingleReconnect) {
- const int kNumServers = 3;
- StartServers(kNumServers);
- const auto ports = GetServersPorts();
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(ports);
- for (size_t i = 0; i < kNumServers; ++i) {
- WaitForServer(stub, i, DEBUG_LOCATION);
- }
- for (size_t i = 0; i < servers_.size(); ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
- }
- // One request should have gone to each server.
- for (size_t i = 0; i < servers_.size(); ++i) {
- EXPECT_EQ(1, servers_[i]->service_.request_count());
- }
- const auto pre_death = servers_[0]->service_.request_count();
- // Kill the first server.
- servers_[0]->Shutdown();
- // Client request still succeed. May need retrying if RR had returned a pick
- // before noticing the change in the server's connectivity.
- while (!SendRpc(stub)) {
- } // Retry until success.
- // Send a bunch of RPCs that should succeed.
- for (int i = 0; i < 10 * kNumServers; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- const auto post_death = servers_[0]->service_.request_count();
- // No requests have gone to the deceased server.
- EXPECT_EQ(pre_death, post_death);
- // Bring the first server back up.
- StartServer(0);
- // Requests should start arriving at the first server either right away (if
- // the server managed to start before the RR policy retried the subchannel) or
- // after the subchannel retry delay otherwise (RR's subchannel retried before
- // the server was fully back up).
- WaitForServer(stub, 0, DEBUG_LOCATION);
-}
-
-// If health checking is required by client but health checking service
-// is not running on the server, the channel should be treated as healthy.
-TEST_F(ClientLbEnd2endTest,
- RoundRobinServersHealthCheckingUnimplementedTreatedAsHealthy) {
- StartServers(1); // Single server
- ChannelArguments args;
- args.SetServiceConfigJSON(
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name\"}}");
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution({servers_[0]->port_});
- EXPECT_TRUE(WaitForChannelReady(channel.get()));
- CheckRpcSendOk(stub, DEBUG_LOCATION);
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinWithHealthChecking) {
- EnableDefaultHealthCheckService(true);
- // Start servers.
- const int kNumServers = 3;
- StartServers(kNumServers);
- ChannelArguments args;
- args.SetServiceConfigJSON(
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name\"}}");
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- // Channel should not become READY, because health checks should be failing.
- gpr_log(GPR_INFO,
- "*** initial state: unknown health check service name for "
- "all servers");
- EXPECT_FALSE(WaitForChannelReady(channel.get(), 1));
- // Now set one of the servers to be healthy.
- // The channel should become healthy and all requests should go to
- // the healthy server.
- gpr_log(GPR_INFO, "*** server 0 healthy");
- servers_[0]->SetServingStatus("health_check_service_name", true);
- EXPECT_TRUE(WaitForChannelReady(channel.get()));
- for (int i = 0; i < 10; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- EXPECT_EQ(10, servers_[0]->service_.request_count());
- EXPECT_EQ(0, servers_[1]->service_.request_count());
- EXPECT_EQ(0, servers_[2]->service_.request_count());
- // Now set a second server to be healthy.
- gpr_log(GPR_INFO, "*** server 2 healthy");
- servers_[2]->SetServingStatus("health_check_service_name", true);
- WaitForServer(stub, 2, DEBUG_LOCATION);
- for (int i = 0; i < 10; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- EXPECT_EQ(5, servers_[0]->service_.request_count());
- EXPECT_EQ(0, servers_[1]->service_.request_count());
- EXPECT_EQ(5, servers_[2]->service_.request_count());
- // Now set the remaining server to be healthy.
- gpr_log(GPR_INFO, "*** server 1 healthy");
- servers_[1]->SetServingStatus("health_check_service_name", true);
- WaitForServer(stub, 1, DEBUG_LOCATION);
- for (int i = 0; i < 9; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- EXPECT_EQ(3, servers_[0]->service_.request_count());
- EXPECT_EQ(3, servers_[1]->service_.request_count());
- EXPECT_EQ(3, servers_[2]->service_.request_count());
- // Now set one server to be unhealthy again. Then wait until the
- // unhealthiness has hit the client. We know that the client will see
- // this when we send kNumServers requests and one of the remaining servers
- // sees two of the requests.
- gpr_log(GPR_INFO, "*** server 0 unhealthy");
- servers_[0]->SetServingStatus("health_check_service_name", false);
- do {
- ResetCounters();
- for (int i = 0; i < kNumServers; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- } while (servers_[1]->service_.request_count() != 2 &&
- servers_[2]->service_.request_count() != 2);
- // Now set the remaining two servers to be unhealthy. Make sure the
- // channel leaves READY state and that RPCs fail.
- gpr_log(GPR_INFO, "*** all servers unhealthy");
- servers_[1]->SetServingStatus("health_check_service_name", false);
- servers_[2]->SetServingStatus("health_check_service_name", false);
- EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
- CheckRpcSendFailure(stub);
- // Clean up.
- EnableDefaultHealthCheckService(false);
-}
-
-TEST_F(ClientLbEnd2endTest,
- RoundRobinWithHealthCheckingHandlesSubchannelFailure) {
- EnableDefaultHealthCheckService(true);
- // Start servers.
- const int kNumServers = 3;
- StartServers(kNumServers);
- servers_[0]->SetServingStatus("health_check_service_name", true);
- servers_[1]->SetServingStatus("health_check_service_name", true);
- servers_[2]->SetServingStatus("health_check_service_name", true);
- ChannelArguments args;
- args.SetServiceConfigJSON(
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name\"}}");
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- WaitForServer(stub, 0, DEBUG_LOCATION);
- // Stop server 0 and send a new resolver result to ensure that RR
- // checks each subchannel's state.
- servers_[0]->Shutdown();
- response_generator.SetNextResolution(GetServersPorts());
- // Send a bunch more RPCs.
- for (size_t i = 0; i < 100; i++) {
- SendRpc(stub);
- }
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinWithHealthCheckingInhibitPerChannel) {
- EnableDefaultHealthCheckService(true);
- // Start server.
- const int kNumServers = 1;
- StartServers(kNumServers);
- // Create a channel with health-checking enabled.
- ChannelArguments args;
- args.SetServiceConfigJSON(
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name\"}}");
- auto response_generator1 = BuildResolverResponseGenerator();
- auto channel1 = BuildChannel("round_robin", response_generator1, args);
- auto stub1 = BuildStub(channel1);
- std::vector<int> ports = GetServersPorts();
- response_generator1.SetNextResolution(ports);
- // Create a channel with health checking enabled but inhibited.
- args.SetInt(GRPC_ARG_INHIBIT_HEALTH_CHECKING, 1);
- auto response_generator2 = BuildResolverResponseGenerator();
- auto channel2 = BuildChannel("round_robin", response_generator2, args);
- auto stub2 = BuildStub(channel2);
- response_generator2.SetNextResolution(ports);
- // First channel should not become READY, because health checks should be
- // failing.
- EXPECT_FALSE(WaitForChannelReady(channel1.get(), 1));
- CheckRpcSendFailure(stub1);
- // Second channel should be READY.
- EXPECT_TRUE(WaitForChannelReady(channel2.get(), 1));
- CheckRpcSendOk(stub2, DEBUG_LOCATION);
- // Enable health checks on the backend and wait for channel 1 to succeed.
- servers_[0]->SetServingStatus("health_check_service_name", true);
- CheckRpcSendOk(stub1, DEBUG_LOCATION, true /* wait_for_ready */);
- // Check that we created only one subchannel to the backend.
- EXPECT_EQ(1UL, servers_[0]->service_.clients().size());
- // Clean up.
- EnableDefaultHealthCheckService(false);
-}
-
-TEST_F(ClientLbEnd2endTest, RoundRobinWithHealthCheckingServiceNamePerChannel) {
- EnableDefaultHealthCheckService(true);
- // Start server.
- const int kNumServers = 1;
- StartServers(kNumServers);
- // Create a channel with health-checking enabled.
- ChannelArguments args;
- args.SetServiceConfigJSON(
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name\"}}");
- auto response_generator1 = BuildResolverResponseGenerator();
- auto channel1 = BuildChannel("round_robin", response_generator1, args);
- auto stub1 = BuildStub(channel1);
- std::vector<int> ports = GetServersPorts();
- response_generator1.SetNextResolution(ports);
- // Create a channel with health-checking enabled with a different
- // service name.
- ChannelArguments args2;
- args2.SetServiceConfigJSON(
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name2\"}}");
- auto response_generator2 = BuildResolverResponseGenerator();
- auto channel2 = BuildChannel("round_robin", response_generator2, args2);
- auto stub2 = BuildStub(channel2);
- response_generator2.SetNextResolution(ports);
- // Allow health checks from channel 2 to succeed.
- servers_[0]->SetServingStatus("health_check_service_name2", true);
- // First channel should not become READY, because health checks should be
- // failing.
- EXPECT_FALSE(WaitForChannelReady(channel1.get(), 1));
- CheckRpcSendFailure(stub1);
- // Second channel should be READY.
- EXPECT_TRUE(WaitForChannelReady(channel2.get(), 1));
- CheckRpcSendOk(stub2, DEBUG_LOCATION);
- // Enable health checks for channel 1 and wait for it to succeed.
- servers_[0]->SetServingStatus("health_check_service_name", true);
- CheckRpcSendOk(stub1, DEBUG_LOCATION, true /* wait_for_ready */);
- // Check that we created only one subchannel to the backend.
- EXPECT_EQ(1UL, servers_[0]->service_.clients().size());
- // Clean up.
- EnableDefaultHealthCheckService(false);
-}
-
-TEST_F(ClientLbEnd2endTest,
- RoundRobinWithHealthCheckingServiceNameChangesAfterSubchannelsCreated) {
- EnableDefaultHealthCheckService(true);
- // Start server.
- const int kNumServers = 1;
- StartServers(kNumServers);
- // Create a channel with health-checking enabled.
- const char* kServiceConfigJson =
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name\"}}";
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("round_robin", response_generator);
- auto stub = BuildStub(channel);
- std::vector<int> ports = GetServersPorts();
- response_generator.SetNextResolution(ports, kServiceConfigJson);
- servers_[0]->SetServingStatus("health_check_service_name", true);
- EXPECT_TRUE(WaitForChannelReady(channel.get(), 1 /* timeout_seconds */));
- // Send an update on the channel to change it to use a health checking
- // service name that is not being reported as healthy.
- const char* kServiceConfigJson2 =
- "{\"healthCheckConfig\": "
- "{\"serviceName\": \"health_check_service_name2\"}}";
- response_generator.SetNextResolution(ports, kServiceConfigJson2);
- EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
- // Clean up.
- EnableDefaultHealthCheckService(false);
-}
-
-TEST_F(ClientLbEnd2endTest, ChannelIdleness) {
- // Start server.
- const int kNumServers = 1;
- StartServers(kNumServers);
- // Set max idle time and build the channel.
- ChannelArguments args;
- args.SetInt(GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS, 1000);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel = BuildChannel("", response_generator, args);
- auto stub = BuildStub(channel);
- // The initial channel state should be IDLE.
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
- // After sending RPC, channel state should be READY.
- response_generator.SetNextResolution(GetServersPorts());
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
- // After a period time not using the channel, the channel state should switch
- // to IDLE.
- gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(1200));
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
- // Sending a new RPC should awake the IDLE channel.
- response_generator.SetNextResolution(GetServersPorts());
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
-}
-
+ const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
+ const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
+ gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
+ // We should have waited less than kInitialBackOffMs.
+ EXPECT_LT(waited_ms, kInitialBackOffMs);
+}
+
+TEST_F(ClientLbEnd2endTest,
+ PickFirstResetConnectionBackoffNextAttemptStartsImmediately) {
+ ChannelArguments args;
+ constexpr int kInitialBackOffMs = 1000;
+ args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
+ const std::vector<int> ports = {5106}; // {grpc_pick_unused_port_or_die()};
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ // Wait for connect, which should fail ~immediately, because the server
+ // is not up.
+ gpr_log(GPR_INFO, "=== INITIAL CONNECTION ATTEMPT");
+ EXPECT_FALSE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
+ // Reset connection backoff.
+ // Note that the time at which the third attempt will be started is
+ // actually computed at this point, so we record the start time here.
+ gpr_log(GPR_INFO, "=== RESETTING BACKOFF");
+ const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
+ experimental::ChannelResetConnectionBackoff(channel.get());
+ // Trigger a second connection attempt. This should also fail
+ // ~immediately, but the retry should be scheduled for
+ // kInitialBackOffMs instead of applying the multiplier.
+ gpr_log(GPR_INFO, "=== POLLING FOR SECOND CONNECTION ATTEMPT");
+ EXPECT_FALSE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(10)));
+ // Bring up a server on the chosen port.
+ gpr_log(GPR_INFO, "=== STARTING BACKEND");
+ StartServers(1, ports);
+ // Wait for connect. Should happen within kInitialBackOffMs.
+ // Give an extra 100ms to account for the time spent in the second and
+ // third connection attempts themselves (since what we really want to
+ // measure is the time between the two). As long as this is less than
+ // the 1.6x increase we would see if the backoff state was not reset
+ // properly, the test is still proving that the backoff was reset.
+ constexpr int kWaitMs = kInitialBackOffMs + 100;
+ gpr_log(GPR_INFO, "=== POLLING FOR THIRD CONNECTION ATTEMPT");
+ EXPECT_TRUE(channel->WaitForConnected(
+ grpc_timeout_milliseconds_to_deadline(kWaitMs)));
+ const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
+ const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
+ gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited_ms);
+ EXPECT_LT(waited_ms, kWaitMs);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstUpdates) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator);
+ auto stub = BuildStub(channel);
+
+ std::vector<int> ports;
+
+ // Perform one RPC against the first server.
+ ports.emplace_back(servers_[0]->port_);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** SET [0] *******");
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(servers_[0]->service_.request_count(), 1);
+
+ // An empty update will result in the channel going into TRANSIENT_FAILURE.
+ ports.clear();
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** SET none *******");
+ grpc_connectivity_state channel_state;
+ do {
+ channel_state = channel->GetState(true /* try to connect */);
+ } while (channel_state == GRPC_CHANNEL_READY);
+ ASSERT_NE(channel_state, GRPC_CHANNEL_READY);
+ servers_[0]->service_.ResetCounters();
+
+ // Next update introduces servers_[1], making the channel recover.
+ ports.clear();
+ ports.emplace_back(servers_[1]->port_);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** SET [1] *******");
+ WaitForServer(stub, 1, DEBUG_LOCATION);
+ EXPECT_EQ(servers_[0]->service_.request_count(), 0);
+
+ // And again for servers_[2]
+ ports.clear();
+ ports.emplace_back(servers_[2]->port_);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** SET [2] *******");
+ WaitForServer(stub, 2, DEBUG_LOCATION);
+ EXPECT_EQ(servers_[0]->service_.request_count(), 0);
+ EXPECT_EQ(servers_[1]->service_.request_count(), 0);
+
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstUpdateSuperset) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator);
+ auto stub = BuildStub(channel);
+
+ std::vector<int> ports;
+
+ // Perform one RPC against the first server.
+ ports.emplace_back(servers_[0]->port_);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** SET [0] *******");
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(servers_[0]->service_.request_count(), 1);
+ servers_[0]->service_.ResetCounters();
+
+ // Send and superset update
+ ports.clear();
+ ports.emplace_back(servers_[1]->port_);
+ ports.emplace_back(servers_[0]->port_);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** SET superset *******");
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ // We stick to the previously connected server.
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstGlobalSubchannelPool) {
+ // Start one server.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ std::vector<int> ports = GetServersPorts();
+ // Create two channels that (by default) use the global subchannel pool.
+ auto response_generator1 = BuildResolverResponseGenerator();
+ auto channel1 = BuildChannel("pick_first", response_generator1);
+ auto stub1 = BuildStub(channel1);
+ response_generator1.SetNextResolution(ports);
+ auto response_generator2 = BuildResolverResponseGenerator();
+ auto channel2 = BuildChannel("pick_first", response_generator2);
+ auto stub2 = BuildStub(channel2);
+ response_generator2.SetNextResolution(ports);
+ WaitForServer(stub1, 0, DEBUG_LOCATION);
+ // Send one RPC on each channel.
+ CheckRpcSendOk(stub1, DEBUG_LOCATION);
+ CheckRpcSendOk(stub2, DEBUG_LOCATION);
+ // The server receives two requests.
+ EXPECT_EQ(2, servers_[0]->service_.request_count());
+ // The two requests are from the same client port, because the two channels
+ // share subchannels via the global subchannel pool.
+ EXPECT_EQ(1UL, servers_[0]->service_.clients().size());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstLocalSubchannelPool) {
+ // Start one server.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ std::vector<int> ports = GetServersPorts();
+ // Create two channels that use local subchannel pool.
+ ChannelArguments args;
+ args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
+ auto response_generator1 = BuildResolverResponseGenerator();
+ auto channel1 = BuildChannel("pick_first", response_generator1, args);
+ auto stub1 = BuildStub(channel1);
+ response_generator1.SetNextResolution(ports);
+ auto response_generator2 = BuildResolverResponseGenerator();
+ auto channel2 = BuildChannel("pick_first", response_generator2, args);
+ auto stub2 = BuildStub(channel2);
+ response_generator2.SetNextResolution(ports);
+ WaitForServer(stub1, 0, DEBUG_LOCATION);
+ // Send one RPC on each channel.
+ CheckRpcSendOk(stub1, DEBUG_LOCATION);
+ CheckRpcSendOk(stub2, DEBUG_LOCATION);
+ // The server receives two requests.
+ EXPECT_EQ(2, servers_[0]->service_.request_count());
+ // The two requests are from two client ports, because the two channels didn't
+ // share subchannels with each other.
+ EXPECT_EQ(2UL, servers_[0]->service_.clients().size());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstManyUpdates) {
+ const int kNumUpdates = 1000;
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator);
+ auto stub = BuildStub(channel);
+ std::vector<int> ports = GetServersPorts();
+ for (size_t i = 0; i < kNumUpdates; ++i) {
+ std::shuffle(ports.begin(), ports.end(),
+ std::mt19937(std::random_device()()));
+ response_generator.SetNextResolution(ports);
+ // We should re-enter core at the end of the loop to give the resolution
+ // setting closure a chance to run.
+ if ((i + 1) % 10 == 0) CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstReresolutionNoSelected) {
+ // Prepare the ports for up servers and down servers.
+ const int kNumServers = 3;
+ const int kNumAliveServers = 1;
+ StartServers(kNumAliveServers);
+ std::vector<int> alive_ports, dead_ports;
+ for (size_t i = 0; i < kNumServers; ++i) {
+ if (i < kNumAliveServers) {
+ alive_ports.emplace_back(servers_[i]->port_);
+ } else {
+ dead_ports.emplace_back(5107 + i);
+ // dead_ports.emplace_back(grpc_pick_unused_port_or_die());
+ }
+ }
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator);
+ auto stub = BuildStub(channel);
+ // The initial resolution only contains dead ports. There won't be any
+ // selected subchannel. Re-resolution will return the same result.
+ response_generator.SetNextResolution(dead_ports);
+ gpr_log(GPR_INFO, "****** INITIAL RESOLUTION SET *******");
+ for (size_t i = 0; i < 10; ++i) CheckRpcSendFailure(stub);
+ // Set a re-resolution result that contains reachable ports, so that the
+ // pick_first LB policy can recover soon.
+ response_generator.SetNextResolutionUponError(alive_ports);
+ gpr_log(GPR_INFO, "****** RE-RESOLUTION SET *******");
+ WaitForServer(stub, 0, DEBUG_LOCATION, true /* ignore_failure */);
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(servers_[0]->service_.request_count(), 1);
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstReconnectWithoutNewResolverResult) {
+ std::vector<int> ports = {5110}; // {grpc_pick_unused_port_or_die()};
+ StartServers(1, ports);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** INITIAL CONNECTION *******");
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ gpr_log(GPR_INFO, "****** STOPPING SERVER ******");
+ servers_[0]->Shutdown();
+ EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
+ gpr_log(GPR_INFO, "****** RESTARTING SERVER ******");
+ StartServers(1, ports);
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+}
+
+TEST_F(ClientLbEnd2endTest,
+ PickFirstReconnectWithoutNewResolverResultStartsFromTopOfList) {
+ std::vector<int> ports = {5111, // grpc_pick_unused_port_or_die(),
+ 5112}; // grpc_pick_unused_port_or_die()};
+ CreateServers(2, ports);
+ StartServer(1);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("pick_first", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** INITIAL CONNECTION *******");
+ WaitForServer(stub, 1, DEBUG_LOCATION);
+ gpr_log(GPR_INFO, "****** STOPPING SERVER ******");
+ servers_[1]->Shutdown();
+ EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
+ gpr_log(GPR_INFO, "****** STARTING BOTH SERVERS ******");
+ StartServers(2, ports);
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstCheckStateBeforeStartWatch) {
+ std::vector<int> ports = {5113}; // {grpc_pick_unused_port_or_die()};
+ StartServers(1, ports);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel_1 = BuildChannel("pick_first", response_generator);
+ auto stub_1 = BuildStub(channel_1);
+ response_generator.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** RESOLUTION SET FOR CHANNEL 1 *******");
+ WaitForServer(stub_1, 0, DEBUG_LOCATION);
+ gpr_log(GPR_INFO, "****** CHANNEL 1 CONNECTED *******");
+ servers_[0]->Shutdown();
+ // Channel 1 will receive a re-resolution containing the same server. It will
+ // create a new subchannel and hold a ref to it.
+ StartServers(1, ports);
+ gpr_log(GPR_INFO, "****** SERVER RESTARTED *******");
+ auto response_generator_2 = BuildResolverResponseGenerator();
+ auto channel_2 = BuildChannel("pick_first", response_generator_2);
+ auto stub_2 = BuildStub(channel_2);
+ response_generator_2.SetNextResolution(ports);
+ gpr_log(GPR_INFO, "****** RESOLUTION SET FOR CHANNEL 2 *******");
+ WaitForServer(stub_2, 0, DEBUG_LOCATION, true);
+ gpr_log(GPR_INFO, "****** CHANNEL 2 CONNECTED *******");
+ servers_[0]->Shutdown();
+ // Wait until the disconnection has triggered the connectivity notification.
+ // Otherwise, the subchannel may be picked for next call but will fail soon.
+ EXPECT_TRUE(WaitForChannelNotReady(channel_2.get()));
+ // Channel 2 will also receive a re-resolution containing the same server.
+ // Both channels will ref the same subchannel that failed.
+ StartServers(1, ports);
+ gpr_log(GPR_INFO, "****** SERVER RESTARTED AGAIN *******");
+ gpr_log(GPR_INFO, "****** CHANNEL 2 STARTING A CALL *******");
+ // The first call after the server restart will succeed.
+ CheckRpcSendOk(stub_2, DEBUG_LOCATION);
+ gpr_log(GPR_INFO, "****** CHANNEL 2 FINISHED A CALL *******");
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel_1->GetLoadBalancingPolicyName());
+ // Check LB policy name for the channel.
+ EXPECT_EQ("pick_first", channel_2->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstIdleOnDisconnect) {
+ // Start server, send RPC, and make sure channel is READY.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel =
+ BuildChannel("", response_generator); // pick_first is the default.
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
+ // Stop server. Channel should go into state IDLE.
+ response_generator.SetFailureOnReresolution();
+ servers_[0]->Shutdown();
+ EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
+ servers_.clear();
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstPendingUpdateAndSelectedSubchannelFails) {
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel =
+ BuildChannel("", response_generator); // pick_first is the default.
+ auto stub = BuildStub(channel);
+ // Create a number of servers, but only start 1 of them.
+ CreateServers(10);
+ StartServer(0);
+ // Initially resolve to first server and make sure it connects.
+ gpr_log(GPR_INFO, "Phase 1: Connect to first server.");
+ response_generator.SetNextResolution({servers_[0]->port_});
+ CheckRpcSendOk(stub, DEBUG_LOCATION, true /* wait_for_ready */);
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
+ // Send a resolution update with the remaining servers, none of which are
+ // running yet, so the update will stay pending. Note that it's important
+ // to have multiple servers here, or else the test will be flaky; with only
+ // one server, the pending subchannel list has already gone into
+ // TRANSIENT_FAILURE due to hitting the end of the list by the time we
+ // check the state.
+ gpr_log(GPR_INFO,
+ "Phase 2: Resolver update pointing to remaining "
+ "(not started) servers.");
+ response_generator.SetNextResolution(GetServersPorts(1 /* start_index */));
+ // RPCs will continue to be sent to the first server.
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ // Now stop the first server, so that the current subchannel list
+ // fails. This should cause us to immediately swap over to the
+ // pending list, even though it's not yet connected. The state should
+ // be set to CONNECTING, since that's what the pending subchannel list
+ // was doing when we swapped over.
+ gpr_log(GPR_INFO, "Phase 3: Stopping first server.");
+ servers_[0]->Shutdown();
+ WaitForChannelNotReady(channel.get());
+ // TODO(roth): This should always return CONNECTING, but it's flaky
+ // between that and TRANSIENT_FAILURE. I suspect that this problem
+ // will go away once we move the backoff code out of the subchannel
+ // and into the LB policies.
+ EXPECT_THAT(channel->GetState(false),
+ ::testing::AnyOf(GRPC_CHANNEL_CONNECTING,
+ GRPC_CHANNEL_TRANSIENT_FAILURE));
+ // Now start the second server.
+ gpr_log(GPR_INFO, "Phase 4: Starting second server.");
+ StartServer(1);
+ // The channel should go to READY state and RPCs should go to the
+ // second server.
+ WaitForChannelReady(channel.get());
+ WaitForServer(stub, 1, DEBUG_LOCATION, true /* ignore_failure */);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstStaysIdleUponEmptyUpdate) {
+ // Start server, send RPC, and make sure channel is READY.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel =
+ BuildChannel("", response_generator); // pick_first is the default.
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
+ // Stop server. Channel should go into state IDLE.
+ servers_[0]->Shutdown();
+ EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
+ // Now send resolver update that includes no addresses. Channel
+ // should stay in state IDLE.
+ response_generator.SetNextResolution({});
+ EXPECT_FALSE(channel->WaitForStateChange(
+ GRPC_CHANNEL_IDLE, grpc_timeout_seconds_to_deadline(3)));
+ // Now bring the backend back up and send a non-empty resolver update,
+ // and then try to send an RPC. Channel should go back into state READY.
+ StartServer(0);
+ response_generator.SetNextResolution(GetServersPorts());
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobin) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ // Wait until all backends are ready.
+ do {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ } while (!SeenAllServers());
+ ResetCounters();
+ // "Sync" to the end of the list. Next sequence of picks will start at the
+ // first server (index 0).
+ WaitForServer(stub, servers_.size() - 1, DEBUG_LOCATION);
+ std::vector<int> connection_order;
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ UpdateConnectionOrder(servers_, &connection_order);
+ }
+ // Backends should be iterated over in the order in which the addresses were
+ // given.
+ const auto expected = std::vector<int>{0, 1, 2};
+ EXPECT_EQ(expected, connection_order);
+ // Check LB policy name for the channel.
+ EXPECT_EQ("round_robin", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinProcessPending) {
+ StartServers(1); // Single server
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution({servers_[0]->port_});
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ // Create a new channel and its corresponding RR LB policy, which will pick
+ // the subchannels in READY state from the previous RPC against the same
+ // target (even if it happened over a different channel, because subchannels
+ // are globally reused). Progress should happen without any transition from
+ // this READY state.
+ auto second_response_generator = BuildResolverResponseGenerator();
+ auto second_channel = BuildChannel("round_robin", second_response_generator);
+ auto second_stub = BuildStub(second_channel);
+ second_response_generator.SetNextResolution({servers_[0]->port_});
+ CheckRpcSendOk(second_stub, DEBUG_LOCATION);
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinUpdates) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ std::vector<int> ports;
+ // Start with a single server.
+ gpr_log(GPR_INFO, "*** FIRST BACKEND ***");
+ ports.emplace_back(servers_[0]->port_);
+ response_generator.SetNextResolution(ports);
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ // Send RPCs. They should all go servers_[0]
+ for (size_t i = 0; i < 10; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(10, servers_[0]->service_.request_count());
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+ EXPECT_EQ(0, servers_[2]->service_.request_count());
+ servers_[0]->service_.ResetCounters();
+ // And now for the second server.
+ gpr_log(GPR_INFO, "*** SECOND BACKEND ***");
+ ports.clear();
+ ports.emplace_back(servers_[1]->port_);
+ response_generator.SetNextResolution(ports);
+ // Wait until update has been processed, as signaled by the second backend
+ // receiving a request.
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+ WaitForServer(stub, 1, DEBUG_LOCATION);
+ for (size_t i = 0; i < 10; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(0, servers_[0]->service_.request_count());
+ EXPECT_EQ(10, servers_[1]->service_.request_count());
+ EXPECT_EQ(0, servers_[2]->service_.request_count());
+ servers_[1]->service_.ResetCounters();
+ // ... and for the last server.
+ gpr_log(GPR_INFO, "*** THIRD BACKEND ***");
+ ports.clear();
+ ports.emplace_back(servers_[2]->port_);
+ response_generator.SetNextResolution(ports);
+ WaitForServer(stub, 2, DEBUG_LOCATION);
+ for (size_t i = 0; i < 10; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(0, servers_[0]->service_.request_count());
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+ EXPECT_EQ(10, servers_[2]->service_.request_count());
+ servers_[2]->service_.ResetCounters();
+ // Back to all servers.
+ gpr_log(GPR_INFO, "*** ALL BACKENDS ***");
+ ports.clear();
+ ports.emplace_back(servers_[0]->port_);
+ ports.emplace_back(servers_[1]->port_);
+ ports.emplace_back(servers_[2]->port_);
+ response_generator.SetNextResolution(ports);
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ WaitForServer(stub, 1, DEBUG_LOCATION);
+ WaitForServer(stub, 2, DEBUG_LOCATION);
+ // Send three RPCs, one per server.
+ for (size_t i = 0; i < 3; ++i) CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(1, servers_[0]->service_.request_count());
+ EXPECT_EQ(1, servers_[1]->service_.request_count());
+ EXPECT_EQ(1, servers_[2]->service_.request_count());
+ // An empty update will result in the channel going into TRANSIENT_FAILURE.
+ gpr_log(GPR_INFO, "*** NO BACKENDS ***");
+ ports.clear();
+ response_generator.SetNextResolution(ports);
+ grpc_connectivity_state channel_state;
+ do {
+ channel_state = channel->GetState(true /* try to connect */);
+ } while (channel_state == GRPC_CHANNEL_READY);
+ ASSERT_NE(channel_state, GRPC_CHANNEL_READY);
+ servers_[0]->service_.ResetCounters();
+ // Next update introduces servers_[1], making the channel recover.
+ gpr_log(GPR_INFO, "*** BACK TO SECOND BACKEND ***");
+ ports.clear();
+ ports.emplace_back(servers_[1]->port_);
+ response_generator.SetNextResolution(ports);
+ WaitForServer(stub, 1, DEBUG_LOCATION);
+ channel_state = channel->GetState(false /* try to connect */);
+ ASSERT_EQ(channel_state, GRPC_CHANNEL_READY);
+ // Check LB policy name for the channel.
+ EXPECT_EQ("round_robin", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinUpdateInError) {
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ std::vector<int> ports;
+ // Start with a single server.
+ ports.emplace_back(servers_[0]->port_);
+ response_generator.SetNextResolution(ports);
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ // Send RPCs. They should all go to servers_[0]
+ for (size_t i = 0; i < 10; ++i) SendRpc(stub);
+ EXPECT_EQ(10, servers_[0]->service_.request_count());
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+ EXPECT_EQ(0, servers_[2]->service_.request_count());
+ servers_[0]->service_.ResetCounters();
+ // Shutdown one of the servers to be sent in the update.
+ servers_[1]->Shutdown();
+ ports.emplace_back(servers_[1]->port_);
+ ports.emplace_back(servers_[2]->port_);
+ response_generator.SetNextResolution(ports);
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ WaitForServer(stub, 2, DEBUG_LOCATION);
+ // Send three RPCs, one per server.
+ for (size_t i = 0; i < kNumServers; ++i) SendRpc(stub);
+ // The server in shutdown shouldn't receive any.
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinManyUpdates) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ std::vector<int> ports = GetServersPorts();
+ for (size_t i = 0; i < 1000; ++i) {
+ std::shuffle(ports.begin(), ports.end(),
+ std::mt19937(std::random_device()()));
+ response_generator.SetNextResolution(ports);
+ if (i % 10 == 0) CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ // Check LB policy name for the channel.
+ EXPECT_EQ("round_robin", channel->GetLoadBalancingPolicyName());
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinConcurrentUpdates) {
+ // TODO(dgq): replicate the way internal testing exercises the concurrent
+ // update provisions of RR.
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinReresolve) {
+ // Start servers and send one RPC per server.
+ const int kNumServers = 3;
+ std::vector<int> first_ports;
+ std::vector<int> second_ports;
+ first_ports.reserve(kNumServers);
+ for (int i = 0; i < kNumServers; ++i) {
+ // first_ports.push_back(grpc_pick_unused_port_or_die());
+ first_ports.push_back(5114 + i);
+ }
+ second_ports.reserve(kNumServers);
+ for (int i = 0; i < kNumServers; ++i) {
+ // second_ports.push_back(grpc_pick_unused_port_or_die());
+ second_ports.push_back(5117 + i);
+ }
+ StartServers(kNumServers, first_ports);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(first_ports);
+ // Send a number of RPCs, which succeed.
+ for (size_t i = 0; i < 100; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ // Kill all servers
+ gpr_log(GPR_INFO, "****** ABOUT TO KILL SERVERS *******");
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ servers_[i]->Shutdown();
+ }
+ gpr_log(GPR_INFO, "****** SERVERS KILLED *******");
+ gpr_log(GPR_INFO, "****** SENDING DOOMED REQUESTS *******");
+ // Client requests should fail. Send enough to tickle all subchannels.
+ for (size_t i = 0; i < servers_.size(); ++i) CheckRpcSendFailure(stub);
+ gpr_log(GPR_INFO, "****** DOOMED REQUESTS SENT *******");
+ // Bring servers back up on a different set of ports. We need to do this to be
+ // sure that the eventual success is *not* due to subchannel reconnection
+ // attempts and that an actual re-resolution has happened as a result of the
+ // RR policy going into transient failure when all its subchannels become
+ // unavailable (in transient failure as well).
+ gpr_log(GPR_INFO, "****** RESTARTING SERVERS *******");
+ StartServers(kNumServers, second_ports);
+ // Don't notify of the update. Wait for the LB policy's re-resolution to
+ // "pull" the new ports.
+ response_generator.SetNextResolutionUponError(second_ports);
+ gpr_log(GPR_INFO, "****** SERVERS RESTARTED *******");
+ gpr_log(GPR_INFO, "****** SENDING REQUEST TO SUCCEED *******");
+ // Client request should eventually (but still fairly soon) succeed.
+ const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ while (gpr_time_cmp(deadline, now) > 0) {
+ if (SendRpc(stub)) break;
+ now = gpr_now(GPR_CLOCK_MONOTONIC);
+ }
+ ASSERT_GT(gpr_time_cmp(deadline, now), 0);
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinTransientFailure) {
+ // Start servers and create channel. Channel should go to READY state.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ EXPECT_TRUE(WaitForChannelReady(channel.get()));
+ // Now kill the servers. The channel should transition to TRANSIENT_FAILURE.
+ // TODO(roth): This test should ideally check that even when the
+ // subchannels are in state CONNECTING for an extended period of time,
+ // we will still report TRANSIENT_FAILURE. Unfortunately, we don't
+ // currently have a good way to get a subchannel to report CONNECTING
+ // for a long period of time, since the servers in this test framework
+ // are on the loopback interface, which will immediately return a
+ // "Connection refused" error, so the subchannels will only be in
+ // CONNECTING state very briefly. When we have time, see if we can
+ // find a way to fix this.
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ servers_[i]->Shutdown();
+ }
+ auto predicate = [](grpc_connectivity_state state) {
+ return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
+ };
+ EXPECT_TRUE(WaitForChannelState(channel.get(), predicate));
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinTransientFailureAtStartup) {
+ // Create channel and return servers that don't exist. Channel should
+ // quickly transition into TRANSIENT_FAILURE.
+ // TODO(roth): This test should ideally check that even when the
+ // subchannels are in state CONNECTING for an extended period of time,
+ // we will still report TRANSIENT_FAILURE. Unfortunately, we don't
+ // currently have a good way to get a subchannel to report CONNECTING
+ // for a long period of time, since the servers in this test framework
+ // are on the loopback interface, which will immediately return a
+ // "Connection refused" error, so the subchannels will only be in
+ // CONNECTING state very briefly. When we have time, see if we can
+ // find a way to fix this.
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution({
+ grpc_pick_unused_port_or_die(),
+ grpc_pick_unused_port_or_die(),
+ grpc_pick_unused_port_or_die(),
+ });
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ servers_[i]->Shutdown();
+ }
+ auto predicate = [](grpc_connectivity_state state) {
+ return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
+ };
+ EXPECT_TRUE(WaitForChannelState(channel.get(), predicate, true));
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinSingleReconnect) {
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ const auto ports = GetServersPorts();
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(ports);
+ for (size_t i = 0; i < kNumServers; ++i) {
+ WaitForServer(stub, i, DEBUG_LOCATION);
+ }
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
+ }
+ // One request should have gone to each server.
+ for (size_t i = 0; i < servers_.size(); ++i) {
+ EXPECT_EQ(1, servers_[i]->service_.request_count());
+ }
+ const auto pre_death = servers_[0]->service_.request_count();
+ // Kill the first server.
+ servers_[0]->Shutdown();
+ // Client request still succeed. May need retrying if RR had returned a pick
+ // before noticing the change in the server's connectivity.
+ while (!SendRpc(stub)) {
+ } // Retry until success.
+ // Send a bunch of RPCs that should succeed.
+ for (int i = 0; i < 10 * kNumServers; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ const auto post_death = servers_[0]->service_.request_count();
+ // No requests have gone to the deceased server.
+ EXPECT_EQ(pre_death, post_death);
+ // Bring the first server back up.
+ StartServer(0);
+ // Requests should start arriving at the first server either right away (if
+ // the server managed to start before the RR policy retried the subchannel) or
+ // after the subchannel retry delay otherwise (RR's subchannel retried before
+ // the server was fully back up).
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+}
+
+// If health checking is required by client but health checking service
+// is not running on the server, the channel should be treated as healthy.
+TEST_F(ClientLbEnd2endTest,
+ RoundRobinServersHealthCheckingUnimplementedTreatedAsHealthy) {
+ StartServers(1); // Single server
+ ChannelArguments args;
+ args.SetServiceConfigJSON(
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name\"}}");
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution({servers_[0]->port_});
+ EXPECT_TRUE(WaitForChannelReady(channel.get()));
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinWithHealthChecking) {
+ EnableDefaultHealthCheckService(true);
+ // Start servers.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ ChannelArguments args;
+ args.SetServiceConfigJSON(
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name\"}}");
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ // Channel should not become READY, because health checks should be failing.
+ gpr_log(GPR_INFO,
+ "*** initial state: unknown health check service name for "
+ "all servers");
+ EXPECT_FALSE(WaitForChannelReady(channel.get(), 1));
+ // Now set one of the servers to be healthy.
+ // The channel should become healthy and all requests should go to
+ // the healthy server.
+ gpr_log(GPR_INFO, "*** server 0 healthy");
+ servers_[0]->SetServingStatus("health_check_service_name", true);
+ EXPECT_TRUE(WaitForChannelReady(channel.get()));
+ for (int i = 0; i < 10; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ EXPECT_EQ(10, servers_[0]->service_.request_count());
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+ EXPECT_EQ(0, servers_[2]->service_.request_count());
+ // Now set a second server to be healthy.
+ gpr_log(GPR_INFO, "*** server 2 healthy");
+ servers_[2]->SetServingStatus("health_check_service_name", true);
+ WaitForServer(stub, 2, DEBUG_LOCATION);
+ for (int i = 0; i < 10; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ EXPECT_EQ(5, servers_[0]->service_.request_count());
+ EXPECT_EQ(0, servers_[1]->service_.request_count());
+ EXPECT_EQ(5, servers_[2]->service_.request_count());
+ // Now set the remaining server to be healthy.
+ gpr_log(GPR_INFO, "*** server 1 healthy");
+ servers_[1]->SetServingStatus("health_check_service_name", true);
+ WaitForServer(stub, 1, DEBUG_LOCATION);
+ for (int i = 0; i < 9; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ EXPECT_EQ(3, servers_[0]->service_.request_count());
+ EXPECT_EQ(3, servers_[1]->service_.request_count());
+ EXPECT_EQ(3, servers_[2]->service_.request_count());
+ // Now set one server to be unhealthy again. Then wait until the
+ // unhealthiness has hit the client. We know that the client will see
+ // this when we send kNumServers requests and one of the remaining servers
+ // sees two of the requests.
+ gpr_log(GPR_INFO, "*** server 0 unhealthy");
+ servers_[0]->SetServingStatus("health_check_service_name", false);
+ do {
+ ResetCounters();
+ for (int i = 0; i < kNumServers; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ } while (servers_[1]->service_.request_count() != 2 &&
+ servers_[2]->service_.request_count() != 2);
+ // Now set the remaining two servers to be unhealthy. Make sure the
+ // channel leaves READY state and that RPCs fail.
+ gpr_log(GPR_INFO, "*** all servers unhealthy");
+ servers_[1]->SetServingStatus("health_check_service_name", false);
+ servers_[2]->SetServingStatus("health_check_service_name", false);
+ EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
+ CheckRpcSendFailure(stub);
+ // Clean up.
+ EnableDefaultHealthCheckService(false);
+}
+
+TEST_F(ClientLbEnd2endTest,
+ RoundRobinWithHealthCheckingHandlesSubchannelFailure) {
+ EnableDefaultHealthCheckService(true);
+ // Start servers.
+ const int kNumServers = 3;
+ StartServers(kNumServers);
+ servers_[0]->SetServingStatus("health_check_service_name", true);
+ servers_[1]->SetServingStatus("health_check_service_name", true);
+ servers_[2]->SetServingStatus("health_check_service_name", true);
+ ChannelArguments args;
+ args.SetServiceConfigJSON(
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name\"}}");
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ WaitForServer(stub, 0, DEBUG_LOCATION);
+ // Stop server 0 and send a new resolver result to ensure that RR
+ // checks each subchannel's state.
+ servers_[0]->Shutdown();
+ response_generator.SetNextResolution(GetServersPorts());
+ // Send a bunch more RPCs.
+ for (size_t i = 0; i < 100; i++) {
+ SendRpc(stub);
+ }
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinWithHealthCheckingInhibitPerChannel) {
+ EnableDefaultHealthCheckService(true);
+ // Start server.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ // Create a channel with health-checking enabled.
+ ChannelArguments args;
+ args.SetServiceConfigJSON(
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name\"}}");
+ auto response_generator1 = BuildResolverResponseGenerator();
+ auto channel1 = BuildChannel("round_robin", response_generator1, args);
+ auto stub1 = BuildStub(channel1);
+ std::vector<int> ports = GetServersPorts();
+ response_generator1.SetNextResolution(ports);
+ // Create a channel with health checking enabled but inhibited.
+ args.SetInt(GRPC_ARG_INHIBIT_HEALTH_CHECKING, 1);
+ auto response_generator2 = BuildResolverResponseGenerator();
+ auto channel2 = BuildChannel("round_robin", response_generator2, args);
+ auto stub2 = BuildStub(channel2);
+ response_generator2.SetNextResolution(ports);
+ // First channel should not become READY, because health checks should be
+ // failing.
+ EXPECT_FALSE(WaitForChannelReady(channel1.get(), 1));
+ CheckRpcSendFailure(stub1);
+ // Second channel should be READY.
+ EXPECT_TRUE(WaitForChannelReady(channel2.get(), 1));
+ CheckRpcSendOk(stub2, DEBUG_LOCATION);
+ // Enable health checks on the backend and wait for channel 1 to succeed.
+ servers_[0]->SetServingStatus("health_check_service_name", true);
+ CheckRpcSendOk(stub1, DEBUG_LOCATION, true /* wait_for_ready */);
+ // Check that we created only one subchannel to the backend.
+ EXPECT_EQ(1UL, servers_[0]->service_.clients().size());
+ // Clean up.
+ EnableDefaultHealthCheckService(false);
+}
+
+TEST_F(ClientLbEnd2endTest, RoundRobinWithHealthCheckingServiceNamePerChannel) {
+ EnableDefaultHealthCheckService(true);
+ // Start server.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ // Create a channel with health-checking enabled.
+ ChannelArguments args;
+ args.SetServiceConfigJSON(
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name\"}}");
+ auto response_generator1 = BuildResolverResponseGenerator();
+ auto channel1 = BuildChannel("round_robin", response_generator1, args);
+ auto stub1 = BuildStub(channel1);
+ std::vector<int> ports = GetServersPorts();
+ response_generator1.SetNextResolution(ports);
+ // Create a channel with health-checking enabled with a different
+ // service name.
+ ChannelArguments args2;
+ args2.SetServiceConfigJSON(
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name2\"}}");
+ auto response_generator2 = BuildResolverResponseGenerator();
+ auto channel2 = BuildChannel("round_robin", response_generator2, args2);
+ auto stub2 = BuildStub(channel2);
+ response_generator2.SetNextResolution(ports);
+ // Allow health checks from channel 2 to succeed.
+ servers_[0]->SetServingStatus("health_check_service_name2", true);
+ // First channel should not become READY, because health checks should be
+ // failing.
+ EXPECT_FALSE(WaitForChannelReady(channel1.get(), 1));
+ CheckRpcSendFailure(stub1);
+ // Second channel should be READY.
+ EXPECT_TRUE(WaitForChannelReady(channel2.get(), 1));
+ CheckRpcSendOk(stub2, DEBUG_LOCATION);
+ // Enable health checks for channel 1 and wait for it to succeed.
+ servers_[0]->SetServingStatus("health_check_service_name", true);
+ CheckRpcSendOk(stub1, DEBUG_LOCATION, true /* wait_for_ready */);
+ // Check that we created only one subchannel to the backend.
+ EXPECT_EQ(1UL, servers_[0]->service_.clients().size());
+ // Clean up.
+ EnableDefaultHealthCheckService(false);
+}
+
+TEST_F(ClientLbEnd2endTest,
+ RoundRobinWithHealthCheckingServiceNameChangesAfterSubchannelsCreated) {
+ EnableDefaultHealthCheckService(true);
+ // Start server.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ // Create a channel with health-checking enabled.
+ const char* kServiceConfigJson =
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name\"}}";
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("round_robin", response_generator);
+ auto stub = BuildStub(channel);
+ std::vector<int> ports = GetServersPorts();
+ response_generator.SetNextResolution(ports, kServiceConfigJson);
+ servers_[0]->SetServingStatus("health_check_service_name", true);
+ EXPECT_TRUE(WaitForChannelReady(channel.get(), 1 /* timeout_seconds */));
+ // Send an update on the channel to change it to use a health checking
+ // service name that is not being reported as healthy.
+ const char* kServiceConfigJson2 =
+ "{\"healthCheckConfig\": "
+ "{\"serviceName\": \"health_check_service_name2\"}}";
+ response_generator.SetNextResolution(ports, kServiceConfigJson2);
+ EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
+ // Clean up.
+ EnableDefaultHealthCheckService(false);
+}
+
+TEST_F(ClientLbEnd2endTest, ChannelIdleness) {
+ // Start server.
+ const int kNumServers = 1;
+ StartServers(kNumServers);
+ // Set max idle time and build the channel.
+ ChannelArguments args;
+ args.SetInt(GRPC_ARG_CLIENT_IDLE_TIMEOUT_MS, 1000);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel = BuildChannel("", response_generator, args);
+ auto stub = BuildStub(channel);
+ // The initial channel state should be IDLE.
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
+ // After sending RPC, channel state should be READY.
+ response_generator.SetNextResolution(GetServersPorts());
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
+ // After a period time not using the channel, the channel state should switch
+ // to IDLE.
+ gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(1200));
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
+ // Sending a new RPC should awake the IDLE channel.
+ response_generator.SetNextResolution(GetServersPorts());
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
+}
+
class ClientLbPickArgsTest : public ClientLbEnd2endTest {
protected:
void SetUp() override {
@@ -1715,13 +1715,13 @@ TEST_F(ClientLbPickArgsTest, Basic) {
::testing::Pair("baz", "3"))))));
}
-class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
- protected:
- void SetUp() override {
- ClientLbEnd2endTest::SetUp();
+class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
+ protected:
+ void SetUp() override {
+ ClientLbEnd2endTest::SetUp();
current_test_instance_ = this;
- }
-
+ }
+
static void SetUpTestCase() {
grpc_init();
grpc_core::RegisterInterceptRecvTrailingMetadataLoadBalancingPolicy(
@@ -1730,75 +1730,75 @@ class ClientLbInterceptTrailingMetadataTest : public ClientLbEnd2endTest {
static void TearDownTestCase() { grpc_shutdown_blocking(); }
- int trailers_intercepted() {
- grpc::internal::MutexLock lock(&mu_);
- return trailers_intercepted_;
- }
-
+ int trailers_intercepted() {
+ grpc::internal::MutexLock lock(&mu_);
+ return trailers_intercepted_;
+ }
+
const grpc_core::MetadataVector& trailing_metadata() {
grpc::internal::MutexLock lock(&mu_);
return trailing_metadata_;
}
- const udpa::data::orca::v1::OrcaLoadReport* backend_load_report() {
- grpc::internal::MutexLock lock(&mu_);
- return load_report_.get();
- }
-
- private:
- static void ReportTrailerIntercepted(
+ const udpa::data::orca::v1::OrcaLoadReport* backend_load_report() {
+ grpc::internal::MutexLock lock(&mu_);
+ return load_report_.get();
+ }
+
+ private:
+ static void ReportTrailerIntercepted(
const grpc_core::TrailingMetadataArgsSeen& args_seen) {
const auto* backend_metric_data = args_seen.backend_metric_data;
ClientLbInterceptTrailingMetadataTest* self = current_test_instance_;
- grpc::internal::MutexLock lock(&self->mu_);
- self->trailers_intercepted_++;
+ grpc::internal::MutexLock lock(&self->mu_);
+ self->trailers_intercepted_++;
self->trailing_metadata_ = args_seen.metadata;
- if (backend_metric_data != nullptr) {
- self->load_report_.reset(new udpa::data::orca::v1::OrcaLoadReport);
- self->load_report_->set_cpu_utilization(
- backend_metric_data->cpu_utilization);
- self->load_report_->set_mem_utilization(
- backend_metric_data->mem_utilization);
- self->load_report_->set_rps(backend_metric_data->requests_per_second);
- for (const auto& p : backend_metric_data->request_cost) {
+ if (backend_metric_data != nullptr) {
+ self->load_report_.reset(new udpa::data::orca::v1::OrcaLoadReport);
+ self->load_report_->set_cpu_utilization(
+ backend_metric_data->cpu_utilization);
+ self->load_report_->set_mem_utilization(
+ backend_metric_data->mem_utilization);
+ self->load_report_->set_rps(backend_metric_data->requests_per_second);
+ for (const auto& p : backend_metric_data->request_cost) {
TString name = TString(p.first);
(*self->load_report_->mutable_request_cost())[std::move(name)] =
p.second;
- }
- for (const auto& p : backend_metric_data->utilization) {
+ }
+ for (const auto& p : backend_metric_data->utilization) {
TString name = TString(p.first);
(*self->load_report_->mutable_utilization())[std::move(name)] =
p.second;
- }
- }
- }
-
+ }
+ }
+ }
+
static ClientLbInterceptTrailingMetadataTest* current_test_instance_;
- grpc::internal::Mutex mu_;
- int trailers_intercepted_ = 0;
+ grpc::internal::Mutex mu_;
+ int trailers_intercepted_ = 0;
grpc_core::MetadataVector trailing_metadata_;
- std::unique_ptr<udpa::data::orca::v1::OrcaLoadReport> load_report_;
-};
-
+ std::unique_ptr<udpa::data::orca::v1::OrcaLoadReport> load_report_;
+};
+
ClientLbInterceptTrailingMetadataTest*
ClientLbInterceptTrailingMetadataTest::current_test_instance_ = nullptr;
-TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) {
- const int kNumServers = 1;
- const int kNumRpcs = 10;
- StartServers(kNumServers);
- auto response_generator = BuildResolverResponseGenerator();
- auto channel =
- BuildChannel("intercept_trailing_metadata_lb", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- for (size_t i = 0; i < kNumRpcs; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- // Check LB policy name for the channel.
- EXPECT_EQ("intercept_trailing_metadata_lb",
- channel->GetLoadBalancingPolicyName());
- EXPECT_EQ(kNumRpcs, trailers_intercepted());
+TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) {
+ const int kNumServers = 1;
+ const int kNumRpcs = 10;
+ StartServers(kNumServers);
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel =
+ BuildChannel("intercept_trailing_metadata_lb", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ // Check LB policy name for the channel.
+ EXPECT_EQ("intercept_trailing_metadata_lb",
+ channel->GetLoadBalancingPolicyName());
+ EXPECT_EQ(kNumRpcs, trailers_intercepted());
EXPECT_THAT(trailing_metadata(),
::testing::UnorderedElementsAre(
// TODO(roth): Should grpc-status be visible here?
@@ -1806,41 +1806,41 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesDisabled) {
::testing::Pair("user-agent", ::testing::_),
::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"),
::testing::Pair("baz", "3")));
- EXPECT_EQ(nullptr, backend_load_report());
-}
-
-TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesEnabled) {
- const int kNumServers = 1;
- const int kNumRpcs = 10;
- StartServers(kNumServers);
- ChannelArguments args;
- args.SetServiceConfigJSON(
- "{\n"
- " \"methodConfig\": [ {\n"
- " \"name\": [\n"
- " { \"service\": \"grpc.testing.EchoTestService\" }\n"
- " ],\n"
- " \"retryPolicy\": {\n"
- " \"maxAttempts\": 3,\n"
- " \"initialBackoff\": \"1s\",\n"
- " \"maxBackoff\": \"120s\",\n"
- " \"backoffMultiplier\": 1.6,\n"
- " \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
- " }\n"
- " } ]\n"
- "}");
- auto response_generator = BuildResolverResponseGenerator();
- auto channel =
- BuildChannel("intercept_trailing_metadata_lb", response_generator, args);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- for (size_t i = 0; i < kNumRpcs; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- }
- // Check LB policy name for the channel.
- EXPECT_EQ("intercept_trailing_metadata_lb",
- channel->GetLoadBalancingPolicyName());
- EXPECT_EQ(kNumRpcs, trailers_intercepted());
+ EXPECT_EQ(nullptr, backend_load_report());
+}
+
+TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesEnabled) {
+ const int kNumServers = 1;
+ const int kNumRpcs = 10;
+ StartServers(kNumServers);
+ ChannelArguments args;
+ args.SetServiceConfigJSON(
+ "{\n"
+ " \"methodConfig\": [ {\n"
+ " \"name\": [\n"
+ " { \"service\": \"grpc.testing.EchoTestService\" }\n"
+ " ],\n"
+ " \"retryPolicy\": {\n"
+ " \"maxAttempts\": 3,\n"
+ " \"initialBackoff\": \"1s\",\n"
+ " \"maxBackoff\": \"120s\",\n"
+ " \"backoffMultiplier\": 1.6,\n"
+ " \"retryableStatusCodes\": [ \"ABORTED\" ]\n"
+ " }\n"
+ " } ]\n"
+ "}");
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel =
+ BuildChannel("intercept_trailing_metadata_lb", response_generator, args);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ }
+ // Check LB policy name for the channel.
+ EXPECT_EQ("intercept_trailing_metadata_lb",
+ channel->GetLoadBalancingPolicyName());
+ EXPECT_EQ(kNumRpcs, trailers_intercepted());
EXPECT_THAT(trailing_metadata(),
::testing::UnorderedElementsAre(
// TODO(roth): Should grpc-status be visible here?
@@ -1848,59 +1848,59 @@ TEST_F(ClientLbInterceptTrailingMetadataTest, InterceptsRetriesEnabled) {
::testing::Pair("user-agent", ::testing::_),
::testing::Pair("foo", "1"), ::testing::Pair("bar", "2"),
::testing::Pair("baz", "3")));
- EXPECT_EQ(nullptr, backend_load_report());
-}
-
-TEST_F(ClientLbInterceptTrailingMetadataTest, BackendMetricData) {
- const int kNumServers = 1;
- const int kNumRpcs = 10;
- StartServers(kNumServers);
- udpa::data::orca::v1::OrcaLoadReport load_report;
- load_report.set_cpu_utilization(0.5);
- load_report.set_mem_utilization(0.75);
- load_report.set_rps(25);
- auto* request_cost = load_report.mutable_request_cost();
- (*request_cost)["foo"] = 0.8;
- (*request_cost)["bar"] = 1.4;
- auto* utilization = load_report.mutable_utilization();
- (*utilization)["baz"] = 1.1;
- (*utilization)["quux"] = 0.9;
- for (const auto& server : servers_) {
- server->service_.set_load_report(&load_report);
- }
- auto response_generator = BuildResolverResponseGenerator();
- auto channel =
- BuildChannel("intercept_trailing_metadata_lb", response_generator);
- auto stub = BuildStub(channel);
- response_generator.SetNextResolution(GetServersPorts());
- for (size_t i = 0; i < kNumRpcs; ++i) {
- CheckRpcSendOk(stub, DEBUG_LOCATION);
- auto* actual = backend_load_report();
- ASSERT_NE(actual, nullptr);
- // TODO(roth): Change this to use EqualsProto() once that becomes
- // available in OSS.
- EXPECT_EQ(actual->cpu_utilization(), load_report.cpu_utilization());
- EXPECT_EQ(actual->mem_utilization(), load_report.mem_utilization());
- EXPECT_EQ(actual->rps(), load_report.rps());
- EXPECT_EQ(actual->request_cost().size(), load_report.request_cost().size());
- for (const auto& p : actual->request_cost()) {
- auto it = load_report.request_cost().find(p.first);
- ASSERT_NE(it, load_report.request_cost().end());
- EXPECT_EQ(it->second, p.second);
- }
- EXPECT_EQ(actual->utilization().size(), load_report.utilization().size());
- for (const auto& p : actual->utilization()) {
- auto it = load_report.utilization().find(p.first);
- ASSERT_NE(it, load_report.utilization().end());
- EXPECT_EQ(it->second, p.second);
- }
- }
- // Check LB policy name for the channel.
- EXPECT_EQ("intercept_trailing_metadata_lb",
- channel->GetLoadBalancingPolicyName());
- EXPECT_EQ(kNumRpcs, trailers_intercepted());
-}
-
+ EXPECT_EQ(nullptr, backend_load_report());
+}
+
+TEST_F(ClientLbInterceptTrailingMetadataTest, BackendMetricData) {
+ const int kNumServers = 1;
+ const int kNumRpcs = 10;
+ StartServers(kNumServers);
+ udpa::data::orca::v1::OrcaLoadReport load_report;
+ load_report.set_cpu_utilization(0.5);
+ load_report.set_mem_utilization(0.75);
+ load_report.set_rps(25);
+ auto* request_cost = load_report.mutable_request_cost();
+ (*request_cost)["foo"] = 0.8;
+ (*request_cost)["bar"] = 1.4;
+ auto* utilization = load_report.mutable_utilization();
+ (*utilization)["baz"] = 1.1;
+ (*utilization)["quux"] = 0.9;
+ for (const auto& server : servers_) {
+ server->service_.set_load_report(&load_report);
+ }
+ auto response_generator = BuildResolverResponseGenerator();
+ auto channel =
+ BuildChannel("intercept_trailing_metadata_lb", response_generator);
+ auto stub = BuildStub(channel);
+ response_generator.SetNextResolution(GetServersPorts());
+ for (size_t i = 0; i < kNumRpcs; ++i) {
+ CheckRpcSendOk(stub, DEBUG_LOCATION);
+ auto* actual = backend_load_report();
+ ASSERT_NE(actual, nullptr);
+ // TODO(roth): Change this to use EqualsProto() once that becomes
+ // available in OSS.
+ EXPECT_EQ(actual->cpu_utilization(), load_report.cpu_utilization());
+ EXPECT_EQ(actual->mem_utilization(), load_report.mem_utilization());
+ EXPECT_EQ(actual->rps(), load_report.rps());
+ EXPECT_EQ(actual->request_cost().size(), load_report.request_cost().size());
+ for (const auto& p : actual->request_cost()) {
+ auto it = load_report.request_cost().find(p.first);
+ ASSERT_NE(it, load_report.request_cost().end());
+ EXPECT_EQ(it->second, p.second);
+ }
+ EXPECT_EQ(actual->utilization().size(), load_report.utilization().size());
+ for (const auto& p : actual->utilization()) {
+ auto it = load_report.utilization().find(p.first);
+ ASSERT_NE(it, load_report.utilization().end());
+ EXPECT_EQ(it->second, p.second);
+ }
+ }
+ // Check LB policy name for the channel.
+ EXPECT_EQ("intercept_trailing_metadata_lb",
+ channel->GetLoadBalancingPolicyName());
+ EXPECT_EQ(kNumRpcs, trailers_intercepted());
+}
+
class ClientLbAddressTest : public ClientLbEnd2endTest {
protected:
static const char* kAttributeKey;
@@ -1978,13 +1978,13 @@ TEST_F(ClientLbAddressTest, Basic) {
EXPECT_EQ(addresses_seen(), expected);
}
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- ::testing::InitGoogleTest(&argc, argv);
- grpc::testing::TestEnvironment env(argc, argv);
- const auto result = RUN_ALL_TESTS();
- return result;
-}
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ grpc::testing::TestEnvironment env(argc, argv);
+ const auto result = RUN_ALL_TESTS();
+ return result;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
index 9e1ae49b8d..ad2ddb7e84 100644
--- a/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/end2end_test.cc
@@ -1,95 +1,95 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/impl/codegen/status_code_enum.h>
-#include <grpcpp/resource_quota.h>
-#include <grpcpp/security/auth_metadata_processor.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/impl/codegen/status_code_enum.h>
+#include <grpcpp/resource_quota.h>
+#include <grpcpp/security/auth_metadata_processor.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
#include <grpcpp/support/string_ref.h>
#include <grpcpp/test/channel_test_peer.h>
-
-#include <mutex>
-#include <thread>
-
+
+#include <mutex>
+#include <thread>
+
#include "y_absl/strings/str_format.h"
-#include "src/core/ext/filters/client_channel/backup_poller.h"
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/iomgr/iomgr.h"
-#include "src/core/lib/security/credentials/credentials.h"
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/interceptors_util.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/string_ref_helper.h"
-#include "test/cpp/util/test_credentials_provider.h"
-
-#ifdef GRPC_POSIX_SOCKET_EV
-#include "src/core/lib/iomgr/ev_posix.h"
-#endif // GRPC_POSIX_SOCKET_EV
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using grpc::testing::kTlsCredentialsType;
-using std::chrono::system_clock;
-
-// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
-// should be skipped based on a decision made at SetUp time. In particular,
-// tests that use the callback server can only be run if the iomgr can run in
-// the background or if the transport is in-process.
-#define MAYBE_SKIP_TEST \
- do { \
- if (do_not_test_) { \
- return; \
- } \
- } while (0)
-
-namespace grpc {
-namespace testing {
-namespace {
-
+#include "src/core/ext/filters/client_channel/backup_poller.h"
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/core/lib/security/credentials/credentials.h"
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/interceptors_util.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/string_ref_helper.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+#ifdef GRPC_POSIX_SOCKET_EV
+#include "src/core/lib/iomgr/ev_posix.h"
+#endif // GRPC_POSIX_SOCKET_EV
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using grpc::testing::kTlsCredentialsType;
+using std::chrono::system_clock;
+
+// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
+// should be skipped based on a decision made at SetUp time. In particular,
+// tests that use the callback server can only be run if the iomgr can run in
+// the background or if the transport is in-process.
+#define MAYBE_SKIP_TEST \
+ do { \
+ if (do_not_test_) { \
+ return; \
+ } \
+ } while (0)
+
+namespace grpc {
+namespace testing {
+namespace {
+
bool CheckIsLocalhost(const TString& addr) {
const TString kIpv6("ipv6:[::1]:");
const TString kIpv4MappedIpv6("ipv6:[::ffff:127.0.0.1]:");
const TString kIpv4("ipv4:127.0.0.1:");
- return addr.substr(0, kIpv4.size()) == kIpv4 ||
- addr.substr(0, kIpv4MappedIpv6.size()) == kIpv4MappedIpv6 ||
- addr.substr(0, kIpv6.size()) == kIpv6;
-}
-
+ return addr.substr(0, kIpv4.size()) == kIpv4 ||
+ addr.substr(0, kIpv4MappedIpv6.size()) == kIpv4MappedIpv6 ||
+ addr.substr(0, kIpv6.size()) == kIpv6;
+}
+
const int kClientChannelBackupPollIntervalMs = 200;
-const char kTestCredsPluginErrorMsg[] = "Could not find plugin metadata.";
-
+const char kTestCredsPluginErrorMsg[] = "Could not find plugin metadata.";
+
const char kFakeToken[] = "fake_token";
const char kFakeSelector[] = "fake_selector";
const char kExpectedFakeCredsDebugString[] =
@@ -142,738 +142,738 @@ const char kExpectedCompositeCallCredsDebugString[] =
"key:call-creds-key1,value:call-creds-val1},TestMetadataCredentials{key:"
"call-creds-key2,value:call-creds-val2}}}";
-class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
- public:
- static const char kGoodMetadataKey[];
- static const char kBadMetadataKey[];
-
- TestMetadataCredentialsPlugin(const grpc::string_ref& metadata_key,
- const grpc::string_ref& metadata_value,
- bool is_blocking, bool is_successful,
- int delay_ms)
- : metadata_key_(metadata_key.data(), metadata_key.length()),
- metadata_value_(metadata_value.data(), metadata_value.length()),
- is_blocking_(is_blocking),
- is_successful_(is_successful),
- delay_ms_(delay_ms) {}
-
- bool IsBlocking() const override { return is_blocking_; }
-
- Status GetMetadata(
- grpc::string_ref service_url, grpc::string_ref method_name,
- const grpc::AuthContext& channel_auth_context,
+class TestMetadataCredentialsPlugin : public MetadataCredentialsPlugin {
+ public:
+ static const char kGoodMetadataKey[];
+ static const char kBadMetadataKey[];
+
+ TestMetadataCredentialsPlugin(const grpc::string_ref& metadata_key,
+ const grpc::string_ref& metadata_value,
+ bool is_blocking, bool is_successful,
+ int delay_ms)
+ : metadata_key_(metadata_key.data(), metadata_key.length()),
+ metadata_value_(metadata_value.data(), metadata_value.length()),
+ is_blocking_(is_blocking),
+ is_successful_(is_successful),
+ delay_ms_(delay_ms) {}
+
+ bool IsBlocking() const override { return is_blocking_; }
+
+ Status GetMetadata(
+ grpc::string_ref service_url, grpc::string_ref method_name,
+ const grpc::AuthContext& channel_auth_context,
std::multimap<TString, TString>* metadata) override {
- if (delay_ms_ != 0) {
- gpr_sleep_until(
- gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(delay_ms_, GPR_TIMESPAN)));
- }
- EXPECT_GT(service_url.length(), 0UL);
- EXPECT_GT(method_name.length(), 0UL);
- EXPECT_TRUE(channel_auth_context.IsPeerAuthenticated());
- EXPECT_TRUE(metadata != nullptr);
- if (is_successful_) {
- metadata->insert(std::make_pair(metadata_key_, metadata_value_));
- return Status::OK;
- } else {
- return Status(StatusCode::NOT_FOUND, kTestCredsPluginErrorMsg);
- }
- }
-
+ if (delay_ms_ != 0) {
+ gpr_sleep_until(
+ gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(delay_ms_, GPR_TIMESPAN)));
+ }
+ EXPECT_GT(service_url.length(), 0UL);
+ EXPECT_GT(method_name.length(), 0UL);
+ EXPECT_TRUE(channel_auth_context.IsPeerAuthenticated());
+ EXPECT_TRUE(metadata != nullptr);
+ if (is_successful_) {
+ metadata->insert(std::make_pair(metadata_key_, metadata_value_));
+ return Status::OK;
+ } else {
+ return Status(StatusCode::NOT_FOUND, kTestCredsPluginErrorMsg);
+ }
+ }
+
TString DebugString() override {
return y_absl::StrFormat("TestMetadataCredentials{key:%s,value:%s}",
metadata_key_.c_str(), metadata_value_.c_str());
}
- private:
+ private:
TString metadata_key_;
TString metadata_value_;
- bool is_blocking_;
- bool is_successful_;
- int delay_ms_;
-};
-
-const char TestMetadataCredentialsPlugin::kBadMetadataKey[] =
- "TestPluginMetadata";
-const char TestMetadataCredentialsPlugin::kGoodMetadataKey[] =
- "test-plugin-metadata";
-
-class TestAuthMetadataProcessor : public AuthMetadataProcessor {
- public:
- static const char kGoodGuy[];
-
- TestAuthMetadataProcessor(bool is_blocking) : is_blocking_(is_blocking) {}
-
- std::shared_ptr<CallCredentials> GetCompatibleClientCreds() {
- return grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(
- TestMetadataCredentialsPlugin::kGoodMetadataKey, kGoodGuy,
- is_blocking_, true, 0)));
- }
-
- std::shared_ptr<CallCredentials> GetIncompatibleClientCreds() {
- return grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(
- TestMetadataCredentialsPlugin::kGoodMetadataKey, "Mr Hyde",
- is_blocking_, true, 0)));
- }
-
- // Interface implementation
- bool IsBlocking() const override { return is_blocking_; }
-
- Status Process(const InputMetadata& auth_metadata, AuthContext* context,
- OutputMetadata* consumed_auth_metadata,
- OutputMetadata* response_metadata) override {
- EXPECT_TRUE(consumed_auth_metadata != nullptr);
- EXPECT_TRUE(context != nullptr);
- EXPECT_TRUE(response_metadata != nullptr);
- auto auth_md =
- auth_metadata.find(TestMetadataCredentialsPlugin::kGoodMetadataKey);
- EXPECT_NE(auth_md, auth_metadata.end());
- string_ref auth_md_value = auth_md->second;
- if (auth_md_value == kGoodGuy) {
- context->AddProperty(kIdentityPropName, kGoodGuy);
- context->SetPeerIdentityPropertyName(kIdentityPropName);
- consumed_auth_metadata->insert(std::make_pair(
- string(auth_md->first.data(), auth_md->first.length()),
- string(auth_md->second.data(), auth_md->second.length())));
- return Status::OK;
- } else {
- return Status(StatusCode::UNAUTHENTICATED,
- string("Invalid principal: ") +
- string(auth_md_value.data(), auth_md_value.length()));
- }
- }
-
- private:
- static const char kIdentityPropName[];
- bool is_blocking_;
-};
-
-const char TestAuthMetadataProcessor::kGoodGuy[] = "Dr Jekyll";
-const char TestAuthMetadataProcessor::kIdentityPropName[] = "novel identity";
-
-class Proxy : public ::grpc::testing::EchoTestService::Service {
- public:
- Proxy(const std::shared_ptr<Channel>& channel)
- : stub_(grpc::testing::EchoTestService::NewStub(channel)) {}
-
- Status Echo(ServerContext* server_context, const EchoRequest* request,
- EchoResponse* response) override {
- std::unique_ptr<ClientContext> client_context =
- ClientContext::FromServerContext(*server_context);
- return stub_->Echo(client_context.get(), *request, response);
- }
-
- private:
- std::unique_ptr<::grpc::testing::EchoTestService::Stub> stub_;
-};
-
-class TestServiceImplDupPkg
- : public ::grpc::testing::duplicate::EchoTestService::Service {
- public:
- Status Echo(ServerContext* /*context*/, const EchoRequest* /*request*/,
- EchoResponse* response) override {
- response->set_message("no package");
- return Status::OK;
- }
-};
-
-class TestScenario {
- public:
- TestScenario(bool interceptors, bool proxy, bool inproc_stub,
+ bool is_blocking_;
+ bool is_successful_;
+ int delay_ms_;
+};
+
+const char TestMetadataCredentialsPlugin::kBadMetadataKey[] =
+ "TestPluginMetadata";
+const char TestMetadataCredentialsPlugin::kGoodMetadataKey[] =
+ "test-plugin-metadata";
+
+class TestAuthMetadataProcessor : public AuthMetadataProcessor {
+ public:
+ static const char kGoodGuy[];
+
+ TestAuthMetadataProcessor(bool is_blocking) : is_blocking_(is_blocking) {}
+
+ std::shared_ptr<CallCredentials> GetCompatibleClientCreds() {
+ return grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(
+ TestMetadataCredentialsPlugin::kGoodMetadataKey, kGoodGuy,
+ is_blocking_, true, 0)));
+ }
+
+ std::shared_ptr<CallCredentials> GetIncompatibleClientCreds() {
+ return grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(
+ TestMetadataCredentialsPlugin::kGoodMetadataKey, "Mr Hyde",
+ is_blocking_, true, 0)));
+ }
+
+ // Interface implementation
+ bool IsBlocking() const override { return is_blocking_; }
+
+ Status Process(const InputMetadata& auth_metadata, AuthContext* context,
+ OutputMetadata* consumed_auth_metadata,
+ OutputMetadata* response_metadata) override {
+ EXPECT_TRUE(consumed_auth_metadata != nullptr);
+ EXPECT_TRUE(context != nullptr);
+ EXPECT_TRUE(response_metadata != nullptr);
+ auto auth_md =
+ auth_metadata.find(TestMetadataCredentialsPlugin::kGoodMetadataKey);
+ EXPECT_NE(auth_md, auth_metadata.end());
+ string_ref auth_md_value = auth_md->second;
+ if (auth_md_value == kGoodGuy) {
+ context->AddProperty(kIdentityPropName, kGoodGuy);
+ context->SetPeerIdentityPropertyName(kIdentityPropName);
+ consumed_auth_metadata->insert(std::make_pair(
+ string(auth_md->first.data(), auth_md->first.length()),
+ string(auth_md->second.data(), auth_md->second.length())));
+ return Status::OK;
+ } else {
+ return Status(StatusCode::UNAUTHENTICATED,
+ string("Invalid principal: ") +
+ string(auth_md_value.data(), auth_md_value.length()));
+ }
+ }
+
+ private:
+ static const char kIdentityPropName[];
+ bool is_blocking_;
+};
+
+const char TestAuthMetadataProcessor::kGoodGuy[] = "Dr Jekyll";
+const char TestAuthMetadataProcessor::kIdentityPropName[] = "novel identity";
+
+class Proxy : public ::grpc::testing::EchoTestService::Service {
+ public:
+ Proxy(const std::shared_ptr<Channel>& channel)
+ : stub_(grpc::testing::EchoTestService::NewStub(channel)) {}
+
+ Status Echo(ServerContext* server_context, const EchoRequest* request,
+ EchoResponse* response) override {
+ std::unique_ptr<ClientContext> client_context =
+ ClientContext::FromServerContext(*server_context);
+ return stub_->Echo(client_context.get(), *request, response);
+ }
+
+ private:
+ std::unique_ptr<::grpc::testing::EchoTestService::Stub> stub_;
+};
+
+class TestServiceImplDupPkg
+ : public ::grpc::testing::duplicate::EchoTestService::Service {
+ public:
+ Status Echo(ServerContext* /*context*/, const EchoRequest* /*request*/,
+ EchoResponse* response) override {
+ response->set_message("no package");
+ return Status::OK;
+ }
+};
+
+class TestScenario {
+ public:
+ TestScenario(bool interceptors, bool proxy, bool inproc_stub,
const TString& creds_type, bool use_callback_server)
- : use_interceptors(interceptors),
- use_proxy(proxy),
- inproc(inproc_stub),
- credentials_type(creds_type),
- callback_server(use_callback_server) {}
- void Log() const;
- bool use_interceptors;
- bool use_proxy;
- bool inproc;
+ : use_interceptors(interceptors),
+ use_proxy(proxy),
+ inproc(inproc_stub),
+ credentials_type(creds_type),
+ callback_server(use_callback_server) {}
+ void Log() const;
+ bool use_interceptors;
+ bool use_proxy;
+ bool inproc;
const TString credentials_type;
- bool callback_server;
-};
-
-static std::ostream& operator<<(std::ostream& out,
- const TestScenario& scenario) {
- return out << "TestScenario{use_interceptors="
- << (scenario.use_interceptors ? "true" : "false")
- << ", use_proxy=" << (scenario.use_proxy ? "true" : "false")
- << ", inproc=" << (scenario.inproc ? "true" : "false")
- << ", server_type="
- << (scenario.callback_server ? "callback" : "sync")
- << ", credentials='" << scenario.credentials_type << "'}";
-}
-
-void TestScenario::Log() const {
- std::ostringstream out;
- out << *this;
- gpr_log(GPR_DEBUG, "%s", out.str().c_str());
-}
-
-class End2endTest : public ::testing::TestWithParam<TestScenario> {
- protected:
- static void SetUpTestCase() { grpc_init(); }
- static void TearDownTestCase() { grpc_shutdown(); }
- End2endTest()
- : is_server_started_(false),
- kMaxMessageSize_(8192),
- special_service_("special"),
- first_picked_port_(0) {
- GetParam().Log();
- }
-
- void SetUp() override {
- if (GetParam().callback_server && !GetParam().inproc &&
- !grpc_iomgr_run_in_background()) {
- do_not_test_ = true;
- return;
- }
- }
-
- void TearDown() override {
- if (is_server_started_) {
- server_->Shutdown();
- if (proxy_server_) proxy_server_->Shutdown();
- }
- if (first_picked_port_ > 0) {
- grpc_recycle_unused_port(first_picked_port_);
- }
- }
-
- void StartServer(const std::shared_ptr<AuthMetadataProcessor>& processor) {
- int port = grpc_pick_unused_port_or_die();
- first_picked_port_ = port;
- server_address_ << "127.0.0.1:" << port;
- // Setup server
- BuildAndStartServer(processor);
- }
-
- void RestartServer(const std::shared_ptr<AuthMetadataProcessor>& processor) {
- if (is_server_started_) {
- server_->Shutdown();
- BuildAndStartServer(processor);
- }
- }
-
- void BuildAndStartServer(
- const std::shared_ptr<AuthMetadataProcessor>& processor) {
- ServerBuilder builder;
- ConfigureServerBuilder(&builder);
- auto server_creds = GetCredentialsProvider()->GetServerCredentials(
- GetParam().credentials_type);
- if (GetParam().credentials_type != kInsecureCredentialsType) {
- server_creds->SetAuthMetadataProcessor(processor);
- }
- if (GetParam().use_interceptors) {
- std::vector<
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy server interceptors
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- }
- builder.AddListeningPort(server_address_.str(), server_creds);
- if (!GetParam().callback_server) {
- builder.RegisterService(&service_);
- } else {
- builder.RegisterService(&callback_service_);
- }
- builder.RegisterService("foo.test.youtube.com", &special_service_);
- builder.RegisterService(&dup_pkg_service_);
-
- builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4);
- builder.SetSyncServerOption(
- ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10);
-
- server_ = builder.BuildAndStart();
- is_server_started_ = true;
- }
-
- virtual void ConfigureServerBuilder(ServerBuilder* builder) {
- builder->SetMaxMessageSize(
- kMaxMessageSize_); // For testing max message size.
- }
-
- void ResetChannel(
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators = {}) {
- if (!is_server_started_) {
- StartServer(std::shared_ptr<AuthMetadataProcessor>());
- }
- EXPECT_TRUE(is_server_started_);
- ChannelArguments args;
- auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
- GetParam().credentials_type, &args);
- if (!user_agent_prefix_.empty()) {
- args.SetUserAgentPrefix(user_agent_prefix_);
- }
- args.SetString(GRPC_ARG_SECONDARY_USER_AGENT_STRING, "end2end_test");
-
- if (!GetParam().inproc) {
- if (!GetParam().use_interceptors) {
- channel_ = ::grpc::CreateCustomChannel(server_address_.str(),
- channel_creds, args);
- } else {
- channel_ = CreateCustomChannelWithInterceptors(
- server_address_.str(), channel_creds, args,
- interceptor_creators.empty() ? CreateDummyClientInterceptors()
- : std::move(interceptor_creators));
- }
- } else {
- if (!GetParam().use_interceptors) {
- channel_ = server_->InProcessChannel(args);
- } else {
- channel_ = server_->experimental().InProcessChannelWithInterceptors(
- args, interceptor_creators.empty()
- ? CreateDummyClientInterceptors()
- : std::move(interceptor_creators));
- }
- }
- }
-
- void ResetStub(
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators = {}) {
- ResetChannel(std::move(interceptor_creators));
- if (GetParam().use_proxy) {
- proxy_service_.reset(new Proxy(channel_));
- int port = grpc_pick_unused_port_or_die();
- std::ostringstream proxyaddr;
- proxyaddr << "localhost:" << port;
- ServerBuilder builder;
- builder.AddListeningPort(proxyaddr.str(), InsecureServerCredentials());
- builder.RegisterService(proxy_service_.get());
-
- builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4);
- builder.SetSyncServerOption(
- ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10);
-
- proxy_server_ = builder.BuildAndStart();
-
- channel_ =
- grpc::CreateChannel(proxyaddr.str(), InsecureChannelCredentials());
- }
-
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- DummyInterceptor::Reset();
- }
-
- bool do_not_test_{false};
- bool is_server_started_;
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::unique_ptr<Server> proxy_server_;
- std::unique_ptr<Proxy> proxy_service_;
- std::ostringstream server_address_;
- const int kMaxMessageSize_;
- TestServiceImpl service_;
- CallbackTestServiceImpl callback_service_;
- TestServiceImpl special_service_;
- TestServiceImplDupPkg dup_pkg_service_;
+ bool callback_server;
+};
+
+static std::ostream& operator<<(std::ostream& out,
+ const TestScenario& scenario) {
+ return out << "TestScenario{use_interceptors="
+ << (scenario.use_interceptors ? "true" : "false")
+ << ", use_proxy=" << (scenario.use_proxy ? "true" : "false")
+ << ", inproc=" << (scenario.inproc ? "true" : "false")
+ << ", server_type="
+ << (scenario.callback_server ? "callback" : "sync")
+ << ", credentials='" << scenario.credentials_type << "'}";
+}
+
+void TestScenario::Log() const {
+ std::ostringstream out;
+ out << *this;
+ gpr_log(GPR_DEBUG, "%s", out.str().c_str());
+}
+
+class End2endTest : public ::testing::TestWithParam<TestScenario> {
+ protected:
+ static void SetUpTestCase() { grpc_init(); }
+ static void TearDownTestCase() { grpc_shutdown(); }
+ End2endTest()
+ : is_server_started_(false),
+ kMaxMessageSize_(8192),
+ special_service_("special"),
+ first_picked_port_(0) {
+ GetParam().Log();
+ }
+
+ void SetUp() override {
+ if (GetParam().callback_server && !GetParam().inproc &&
+ !grpc_iomgr_run_in_background()) {
+ do_not_test_ = true;
+ return;
+ }
+ }
+
+ void TearDown() override {
+ if (is_server_started_) {
+ server_->Shutdown();
+ if (proxy_server_) proxy_server_->Shutdown();
+ }
+ if (first_picked_port_ > 0) {
+ grpc_recycle_unused_port(first_picked_port_);
+ }
+ }
+
+ void StartServer(const std::shared_ptr<AuthMetadataProcessor>& processor) {
+ int port = grpc_pick_unused_port_or_die();
+ first_picked_port_ = port;
+ server_address_ << "127.0.0.1:" << port;
+ // Setup server
+ BuildAndStartServer(processor);
+ }
+
+ void RestartServer(const std::shared_ptr<AuthMetadataProcessor>& processor) {
+ if (is_server_started_) {
+ server_->Shutdown();
+ BuildAndStartServer(processor);
+ }
+ }
+
+ void BuildAndStartServer(
+ const std::shared_ptr<AuthMetadataProcessor>& processor) {
+ ServerBuilder builder;
+ ConfigureServerBuilder(&builder);
+ auto server_creds = GetCredentialsProvider()->GetServerCredentials(
+ GetParam().credentials_type);
+ if (GetParam().credentials_type != kInsecureCredentialsType) {
+ server_creds->SetAuthMetadataProcessor(processor);
+ }
+ if (GetParam().use_interceptors) {
+ std::vector<
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy server interceptors
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ }
+ builder.AddListeningPort(server_address_.str(), server_creds);
+ if (!GetParam().callback_server) {
+ builder.RegisterService(&service_);
+ } else {
+ builder.RegisterService(&callback_service_);
+ }
+ builder.RegisterService("foo.test.youtube.com", &special_service_);
+ builder.RegisterService(&dup_pkg_service_);
+
+ builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4);
+ builder.SetSyncServerOption(
+ ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10);
+
+ server_ = builder.BuildAndStart();
+ is_server_started_ = true;
+ }
+
+ virtual void ConfigureServerBuilder(ServerBuilder* builder) {
+ builder->SetMaxMessageSize(
+ kMaxMessageSize_); // For testing max message size.
+ }
+
+ void ResetChannel(
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators = {}) {
+ if (!is_server_started_) {
+ StartServer(std::shared_ptr<AuthMetadataProcessor>());
+ }
+ EXPECT_TRUE(is_server_started_);
+ ChannelArguments args;
+ auto channel_creds = GetCredentialsProvider()->GetChannelCredentials(
+ GetParam().credentials_type, &args);
+ if (!user_agent_prefix_.empty()) {
+ args.SetUserAgentPrefix(user_agent_prefix_);
+ }
+ args.SetString(GRPC_ARG_SECONDARY_USER_AGENT_STRING, "end2end_test");
+
+ if (!GetParam().inproc) {
+ if (!GetParam().use_interceptors) {
+ channel_ = ::grpc::CreateCustomChannel(server_address_.str(),
+ channel_creds, args);
+ } else {
+ channel_ = CreateCustomChannelWithInterceptors(
+ server_address_.str(), channel_creds, args,
+ interceptor_creators.empty() ? CreateDummyClientInterceptors()
+ : std::move(interceptor_creators));
+ }
+ } else {
+ if (!GetParam().use_interceptors) {
+ channel_ = server_->InProcessChannel(args);
+ } else {
+ channel_ = server_->experimental().InProcessChannelWithInterceptors(
+ args, interceptor_creators.empty()
+ ? CreateDummyClientInterceptors()
+ : std::move(interceptor_creators));
+ }
+ }
+ }
+
+ void ResetStub(
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators = {}) {
+ ResetChannel(std::move(interceptor_creators));
+ if (GetParam().use_proxy) {
+ proxy_service_.reset(new Proxy(channel_));
+ int port = grpc_pick_unused_port_or_die();
+ std::ostringstream proxyaddr;
+ proxyaddr << "localhost:" << port;
+ ServerBuilder builder;
+ builder.AddListeningPort(proxyaddr.str(), InsecureServerCredentials());
+ builder.RegisterService(proxy_service_.get());
+
+ builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4);
+ builder.SetSyncServerOption(
+ ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10);
+
+ proxy_server_ = builder.BuildAndStart();
+
+ channel_ =
+ grpc::CreateChannel(proxyaddr.str(), InsecureChannelCredentials());
+ }
+
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ DummyInterceptor::Reset();
+ }
+
+ bool do_not_test_{false};
+ bool is_server_started_;
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::unique_ptr<Server> proxy_server_;
+ std::unique_ptr<Proxy> proxy_service_;
+ std::ostringstream server_address_;
+ const int kMaxMessageSize_;
+ TestServiceImpl service_;
+ CallbackTestServiceImpl callback_service_;
+ TestServiceImpl special_service_;
+ TestServiceImplDupPkg dup_pkg_service_;
TString user_agent_prefix_;
- int first_picked_port_;
-};
-
-static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
- bool with_binary_metadata) {
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello hello hello hello");
-
- for (int i = 0; i < num_rpcs; ++i) {
- ClientContext context;
- if (with_binary_metadata) {
- char bytes[8] = {'\0', '\1', '\2', '\3',
- '\4', '\5', '\6', static_cast<char>(i)};
+ int first_picked_port_;
+};
+
+static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
+ bool with_binary_metadata) {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello hello hello hello");
+
+ for (int i = 0; i < num_rpcs; ++i) {
+ ClientContext context;
+ if (with_binary_metadata) {
+ char bytes[8] = {'\0', '\1', '\2', '\3',
+ '\4', '\5', '\6', static_cast<char>(i)};
context.AddMetadata("custom-bin", TString(bytes, 8));
- }
- context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
- Status s = stub->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
- }
-}
-
-// This class is for testing scenarios where RPCs are cancelled on the server
-// by calling ServerContext::TryCancel()
-class End2endServerTryCancelTest : public End2endTest {
- protected:
- // Helper for testing client-streaming RPCs which are cancelled on the server.
- // Depending on the value of server_try_cancel parameter, this will test one
- // of the following three scenarios:
- // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading
- // any messages from the client
- //
- // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
- // messages from the client
- //
- // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
- // the messages from the client
- //
- // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
- void TestRequestStreamServerCancel(
- ServerTryCancelRequestPhase server_try_cancel, int num_msgs_to_send) {
- MAYBE_SKIP_TEST;
- RestartServer(std::shared_ptr<AuthMetadataProcessor>());
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- // Send server_try_cancel value in the client metadata
- context.AddMetadata(kServerTryCancelRequest,
+ }
+ context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+ Status s = stub->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+ }
+}
+
+// This class is for testing scenarios where RPCs are cancelled on the server
+// by calling ServerContext::TryCancel()
+class End2endServerTryCancelTest : public End2endTest {
+ protected:
+ // Helper for testing client-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading
+ // any messages from the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading
+ // messages from the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading all
+ // the messages from the client
+ //
+ // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
+ void TestRequestStreamServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel, int num_msgs_to_send) {
+ MAYBE_SKIP_TEST;
+ RestartServer(std::shared_ptr<AuthMetadataProcessor>());
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ // Send server_try_cancel value in the client metadata
+ context.AddMetadata(kServerTryCancelRequest,
ToString(server_try_cancel));
-
- auto stream = stub_->RequestStream(&context, &response);
-
- int num_msgs_sent = 0;
- while (num_msgs_sent < num_msgs_to_send) {
- request.set_message("hello");
- if (!stream->Write(request)) {
- break;
- }
- num_msgs_sent++;
- }
- gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent);
-
- stream->WritesDone();
- Status s = stream->Finish();
-
- // At this point, we know for sure that RPC was cancelled by the server
- // since we passed server_try_cancel value in the metadata. Depending on the
- // value of server_try_cancel, the RPC might have been cancelled by the
- // server at different stages. The following validates our expectations of
- // number of messages sent in various cancellation scenarios:
-
- switch (server_try_cancel) {
- case CANCEL_BEFORE_PROCESSING:
- case CANCEL_DURING_PROCESSING:
- // If the RPC is cancelled by server before / during messages from the
- // client, it means that the client most likely did not get a chance to
- // send all the messages it wanted to send. i.e num_msgs_sent <=
- // num_msgs_to_send
- EXPECT_LE(num_msgs_sent, num_msgs_to_send);
- break;
-
- case CANCEL_AFTER_PROCESSING:
- // If the RPC was cancelled after all messages were read by the server,
- // the client did get a chance to send all its messages
- EXPECT_EQ(num_msgs_sent, num_msgs_to_send);
- break;
-
- default:
- gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
- server_try_cancel);
- EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
- server_try_cancel <= CANCEL_AFTER_PROCESSING);
- break;
- }
-
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
- }
-
- // Helper for testing server-streaming RPCs which are cancelled on the server.
- // Depending on the value of server_try_cancel parameter, this will test one
- // of the following three scenarios:
- // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before writing
- // any messages to the client
- //
- // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while writing
- // messages to the client
- //
- // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after writing all
- // the messages to the client
- //
- // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
- void TestResponseStreamServerCancel(
- ServerTryCancelRequestPhase server_try_cancel) {
- MAYBE_SKIP_TEST;
- RestartServer(std::shared_ptr<AuthMetadataProcessor>());
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- // Send server_try_cancel in the client metadata
- context.AddMetadata(kServerTryCancelRequest,
+
+ auto stream = stub_->RequestStream(&context, &response);
+
+ int num_msgs_sent = 0;
+ while (num_msgs_sent < num_msgs_to_send) {
+ request.set_message("hello");
+ if (!stream->Write(request)) {
+ break;
+ }
+ num_msgs_sent++;
+ }
+ gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent);
+
+ stream->WritesDone();
+ Status s = stream->Finish();
+
+ // At this point, we know for sure that RPC was cancelled by the server
+ // since we passed server_try_cancel value in the metadata. Depending on the
+ // value of server_try_cancel, the RPC might have been cancelled by the
+ // server at different stages. The following validates our expectations of
+ // number of messages sent in various cancellation scenarios:
+
+ switch (server_try_cancel) {
+ case CANCEL_BEFORE_PROCESSING:
+ case CANCEL_DURING_PROCESSING:
+ // If the RPC is cancelled by server before / during messages from the
+ // client, it means that the client most likely did not get a chance to
+ // send all the messages it wanted to send. i.e num_msgs_sent <=
+ // num_msgs_to_send
+ EXPECT_LE(num_msgs_sent, num_msgs_to_send);
+ break;
+
+ case CANCEL_AFTER_PROCESSING:
+ // If the RPC was cancelled after all messages were read by the server,
+ // the client did get a chance to send all its messages
+ EXPECT_EQ(num_msgs_sent, num_msgs_to_send);
+ break;
+
+ default:
+ gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
+ server_try_cancel);
+ EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
+ server_try_cancel <= CANCEL_AFTER_PROCESSING);
+ break;
+ }
+
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+ }
+
+ // Helper for testing server-streaming RPCs which are cancelled on the server.
+ // Depending on the value of server_try_cancel parameter, this will test one
+ // of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before writing
+ // any messages to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while writing
+ // messages to the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after writing all
+ // the messages to the client
+ //
+ // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
+ void TestResponseStreamServerCancel(
+ ServerTryCancelRequestPhase server_try_cancel) {
+ MAYBE_SKIP_TEST;
+ RestartServer(std::shared_ptr<AuthMetadataProcessor>());
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ // Send server_try_cancel in the client metadata
+ context.AddMetadata(kServerTryCancelRequest,
ToString(server_try_cancel));
-
- request.set_message("hello");
- auto stream = stub_->ResponseStream(&context, request);
-
- int num_msgs_read = 0;
- while (num_msgs_read < kServerDefaultResponseStreamsToSend) {
- if (!stream->Read(&response)) {
- break;
- }
- EXPECT_EQ(response.message(),
+
+ request.set_message("hello");
+ auto stream = stub_->ResponseStream(&context, request);
+
+ int num_msgs_read = 0;
+ while (num_msgs_read < kServerDefaultResponseStreamsToSend) {
+ if (!stream->Read(&response)) {
+ break;
+ }
+ EXPECT_EQ(response.message(),
request.message() + ToString(num_msgs_read));
- num_msgs_read++;
- }
- gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
-
- Status s = stream->Finish();
-
- // Depending on the value of server_try_cancel, the RPC might have been
- // cancelled by the server at different stages. The following validates our
- // expectations of number of messages read in various cancellation
- // scenarios:
- switch (server_try_cancel) {
- case CANCEL_BEFORE_PROCESSING:
- // Server cancelled before sending any messages. Which means the client
- // wouldn't have read any
- EXPECT_EQ(num_msgs_read, 0);
- break;
-
- case CANCEL_DURING_PROCESSING:
- // Server cancelled while writing messages. Client must have read less
- // than or equal to the expected number of messages
- EXPECT_LE(num_msgs_read, kServerDefaultResponseStreamsToSend);
- break;
-
- case CANCEL_AFTER_PROCESSING:
- // Even though the Server cancelled after writing all messages, the RPC
- // may be cancelled before the Client got a chance to read all the
- // messages.
- EXPECT_LE(num_msgs_read, kServerDefaultResponseStreamsToSend);
- break;
-
- default: {
- gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
- server_try_cancel);
- EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
- server_try_cancel <= CANCEL_AFTER_PROCESSING);
- break;
- }
- }
-
- EXPECT_FALSE(s.ok());
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
- }
-
- // Helper for testing bidirectional-streaming RPCs which are cancelled on the
- // server. Depending on the value of server_try_cancel parameter, this will
- // test one of the following three scenarios:
- // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/
- // writing any messages from/to the client
- //
- // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading/
- // writing messages from/to the client
- //
- // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading/writing
- // all the messages from/to the client
- //
- // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
- void TestBidiStreamServerCancel(ServerTryCancelRequestPhase server_try_cancel,
- int num_messages) {
- MAYBE_SKIP_TEST;
- RestartServer(std::shared_ptr<AuthMetadataProcessor>());
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- // Send server_try_cancel in the client metadata
- context.AddMetadata(kServerTryCancelRequest,
+ num_msgs_read++;
+ }
+ gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
+
+ Status s = stream->Finish();
+
+ // Depending on the value of server_try_cancel, the RPC might have been
+ // cancelled by the server at different stages. The following validates our
+ // expectations of number of messages read in various cancellation
+ // scenarios:
+ switch (server_try_cancel) {
+ case CANCEL_BEFORE_PROCESSING:
+ // Server cancelled before sending any messages. Which means the client
+ // wouldn't have read any
+ EXPECT_EQ(num_msgs_read, 0);
+ break;
+
+ case CANCEL_DURING_PROCESSING:
+ // Server cancelled while writing messages. Client must have read less
+ // than or equal to the expected number of messages
+ EXPECT_LE(num_msgs_read, kServerDefaultResponseStreamsToSend);
+ break;
+
+ case CANCEL_AFTER_PROCESSING:
+ // Even though the Server cancelled after writing all messages, the RPC
+ // may be cancelled before the Client got a chance to read all the
+ // messages.
+ EXPECT_LE(num_msgs_read, kServerDefaultResponseStreamsToSend);
+ break;
+
+ default: {
+ gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
+ server_try_cancel);
+ EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
+ server_try_cancel <= CANCEL_AFTER_PROCESSING);
+ break;
+ }
+ }
+
+ EXPECT_FALSE(s.ok());
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+ }
+
+ // Helper for testing bidirectional-streaming RPCs which are cancelled on the
+ // server. Depending on the value of server_try_cancel parameter, this will
+ // test one of the following three scenarios:
+ // CANCEL_BEFORE_PROCESSING: Rpc is cancelled by the server before reading/
+ // writing any messages from/to the client
+ //
+ // CANCEL_DURING_PROCESSING: Rpc is cancelled by the server while reading/
+ // writing messages from/to the client
+ //
+ // CANCEL_AFTER PROCESSING: Rpc is cancelled by server after reading/writing
+ // all the messages from/to the client
+ //
+ // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
+ void TestBidiStreamServerCancel(ServerTryCancelRequestPhase server_try_cancel,
+ int num_messages) {
+ MAYBE_SKIP_TEST;
+ RestartServer(std::shared_ptr<AuthMetadataProcessor>());
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ // Send server_try_cancel in the client metadata
+ context.AddMetadata(kServerTryCancelRequest,
ToString(server_try_cancel));
-
- auto stream = stub_->BidiStream(&context);
-
- int num_msgs_read = 0;
- int num_msgs_sent = 0;
- while (num_msgs_sent < num_messages) {
+
+ auto stream = stub_->BidiStream(&context);
+
+ int num_msgs_read = 0;
+ int num_msgs_sent = 0;
+ while (num_msgs_sent < num_messages) {
request.set_message("hello " + ToString(num_msgs_sent));
- if (!stream->Write(request)) {
- break;
- }
- num_msgs_sent++;
-
- if (!stream->Read(&response)) {
- break;
- }
- num_msgs_read++;
-
- EXPECT_EQ(response.message(), request.message());
- }
- gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent);
- gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
-
- stream->WritesDone();
- Status s = stream->Finish();
-
- // Depending on the value of server_try_cancel, the RPC might have been
- // cancelled by the server at different stages. The following validates our
- // expectations of number of messages read in various cancellation
- // scenarios:
- switch (server_try_cancel) {
- case CANCEL_BEFORE_PROCESSING:
- EXPECT_EQ(num_msgs_read, 0);
- break;
-
- case CANCEL_DURING_PROCESSING:
- EXPECT_LE(num_msgs_sent, num_messages);
- EXPECT_LE(num_msgs_read, num_msgs_sent);
- break;
-
- case CANCEL_AFTER_PROCESSING:
- EXPECT_EQ(num_msgs_sent, num_messages);
-
- // The Server cancelled after reading the last message and after writing
- // the message to the client. However, the RPC cancellation might have
- // taken effect before the client actually read the response.
- EXPECT_LE(num_msgs_read, num_msgs_sent);
- break;
-
- default:
- gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
- server_try_cancel);
- EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
- server_try_cancel <= CANCEL_AFTER_PROCESSING);
- break;
- }
-
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- // Make sure that the server interceptors were notified
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
- }
-};
-
-TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- context.AddMetadata(kServerTryCancelRequest,
+ if (!stream->Write(request)) {
+ break;
+ }
+ num_msgs_sent++;
+
+ if (!stream->Read(&response)) {
+ break;
+ }
+ num_msgs_read++;
+
+ EXPECT_EQ(response.message(), request.message());
+ }
+ gpr_log(GPR_INFO, "Sent %d messages", num_msgs_sent);
+ gpr_log(GPR_INFO, "Read %d messages", num_msgs_read);
+
+ stream->WritesDone();
+ Status s = stream->Finish();
+
+ // Depending on the value of server_try_cancel, the RPC might have been
+ // cancelled by the server at different stages. The following validates our
+ // expectations of number of messages read in various cancellation
+ // scenarios:
+ switch (server_try_cancel) {
+ case CANCEL_BEFORE_PROCESSING:
+ EXPECT_EQ(num_msgs_read, 0);
+ break;
+
+ case CANCEL_DURING_PROCESSING:
+ EXPECT_LE(num_msgs_sent, num_messages);
+ EXPECT_LE(num_msgs_read, num_msgs_sent);
+ break;
+
+ case CANCEL_AFTER_PROCESSING:
+ EXPECT_EQ(num_msgs_sent, num_messages);
+
+ // The Server cancelled after reading the last message and after writing
+ // the message to the client. However, the RPC cancellation might have
+ // taken effect before the client actually read the response.
+ EXPECT_LE(num_msgs_read, num_msgs_sent);
+ break;
+
+ default:
+ gpr_log(GPR_ERROR, "Invalid server_try_cancel value: %d",
+ server_try_cancel);
+ EXPECT_TRUE(server_try_cancel > DO_NOT_CANCEL &&
+ server_try_cancel <= CANCEL_AFTER_PROCESSING);
+ break;
+ }
+
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ // Make sure that the server interceptors were notified
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+ }
+};
+
+TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ context.AddMetadata(kServerTryCancelRequest,
ToString(CANCEL_BEFORE_PROCESSING));
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
-}
-
-// Server to cancel before doing reading the request
-TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelBeforeReads) {
- TestRequestStreamServerCancel(CANCEL_BEFORE_PROCESSING, 1);
-}
-
-// Server to cancel while reading a request from the stream in parallel
-TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelDuringRead) {
- TestRequestStreamServerCancel(CANCEL_DURING_PROCESSING, 10);
-}
-
-// Server to cancel after reading all the requests but before returning to the
-// client
-TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelAfterReads) {
- TestRequestStreamServerCancel(CANCEL_AFTER_PROCESSING, 4);
-}
-
-// Server to cancel before sending any response messages
-TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelBefore) {
- TestResponseStreamServerCancel(CANCEL_BEFORE_PROCESSING);
-}
-
-// Server to cancel while writing a response to the stream in parallel
-TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelDuring) {
- TestResponseStreamServerCancel(CANCEL_DURING_PROCESSING);
-}
-
-// Server to cancel after writing all the respones to the stream but before
-// returning to the client
-TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelAfter) {
- TestResponseStreamServerCancel(CANCEL_AFTER_PROCESSING);
-}
-
-// Server to cancel before reading/writing any requests/responses on the stream
-TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelBefore) {
- TestBidiStreamServerCancel(CANCEL_BEFORE_PROCESSING, 2);
-}
-
-// Server to cancel while reading/writing requests/responses on the stream in
-// parallel
-TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelDuring) {
- TestBidiStreamServerCancel(CANCEL_DURING_PROCESSING, 10);
-}
-
-// Server to cancel after reading/writing all requests/responses on the stream
-// but before returning to the client
-TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelAfter) {
- TestBidiStreamServerCancel(CANCEL_AFTER_PROCESSING, 5);
-}
-
-TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
- MAYBE_SKIP_TEST;
- // User-Agent is an HTTP header for HTTP transports only
- if (GetParam().inproc) {
- return;
- }
- user_agent_prefix_ = "custom_prefix";
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello hello hello hello");
- request.mutable_param()->set_echo_metadata(true);
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
- const auto& trailing_metadata = context.GetServerTrailingMetadata();
- auto iter = trailing_metadata.find("user-agent");
- EXPECT_TRUE(iter != trailing_metadata.end());
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+}
+
+// Server to cancel before doing reading the request
+TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelBeforeReads) {
+ TestRequestStreamServerCancel(CANCEL_BEFORE_PROCESSING, 1);
+}
+
+// Server to cancel while reading a request from the stream in parallel
+TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelDuringRead) {
+ TestRequestStreamServerCancel(CANCEL_DURING_PROCESSING, 10);
+}
+
+// Server to cancel after reading all the requests but before returning to the
+// client
+TEST_P(End2endServerTryCancelTest, RequestStreamServerCancelAfterReads) {
+ TestRequestStreamServerCancel(CANCEL_AFTER_PROCESSING, 4);
+}
+
+// Server to cancel before sending any response messages
+TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelBefore) {
+ TestResponseStreamServerCancel(CANCEL_BEFORE_PROCESSING);
+}
+
+// Server to cancel while writing a response to the stream in parallel
+TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelDuring) {
+ TestResponseStreamServerCancel(CANCEL_DURING_PROCESSING);
+}
+
+// Server to cancel after writing all the respones to the stream but before
+// returning to the client
+TEST_P(End2endServerTryCancelTest, ResponseStreamServerCancelAfter) {
+ TestResponseStreamServerCancel(CANCEL_AFTER_PROCESSING);
+}
+
+// Server to cancel before reading/writing any requests/responses on the stream
+TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelBefore) {
+ TestBidiStreamServerCancel(CANCEL_BEFORE_PROCESSING, 2);
+}
+
+// Server to cancel while reading/writing requests/responses on the stream in
+// parallel
+TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelDuring) {
+ TestBidiStreamServerCancel(CANCEL_DURING_PROCESSING, 10);
+}
+
+// Server to cancel after reading/writing all requests/responses on the stream
+// but before returning to the client
+TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelAfter) {
+ TestBidiStreamServerCancel(CANCEL_AFTER_PROCESSING, 5);
+}
+
+TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
+ MAYBE_SKIP_TEST;
+ // User-Agent is an HTTP header for HTTP transports only
+ if (GetParam().inproc) {
+ return;
+ }
+ user_agent_prefix_ = "custom_prefix";
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello hello hello hello");
+ request.mutable_param()->set_echo_metadata(true);
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+ const auto& trailing_metadata = context.GetServerTrailingMetadata();
+ auto iter = trailing_metadata.find("user-agent");
+ EXPECT_TRUE(iter != trailing_metadata.end());
TString expected_prefix = user_agent_prefix_ + " grpc-c++/";
- EXPECT_TRUE(iter->second.starts_with(expected_prefix)) << iter->second;
-}
-
-TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
- MAYBE_SKIP_TEST;
- ResetStub();
- std::vector<std::thread> threads;
- threads.reserve(10);
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back(SendRpc, stub_.get(), 10, true);
- }
- for (int i = 0; i < 10; ++i) {
- threads[i].join();
- }
-}
-
-TEST_P(End2endTest, MultipleRpcs) {
- MAYBE_SKIP_TEST;
- ResetStub();
- std::vector<std::thread> threads;
- threads.reserve(10);
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back(SendRpc, stub_.get(), 10, false);
- }
- for (int i = 0; i < 10; ++i) {
- threads[i].join();
- }
-}
-
+ EXPECT_TRUE(iter->second.starts_with(expected_prefix)) << iter->second;
+}
+
+TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ std::vector<std::thread> threads;
+ threads.reserve(10);
+ for (int i = 0; i < 10; ++i) {
+ threads.emplace_back(SendRpc, stub_.get(), 10, true);
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i].join();
+ }
+}
+
+TEST_P(End2endTest, MultipleRpcs) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ std::vector<std::thread> threads;
+ threads.reserve(10);
+ for (int i = 0; i < 10; ++i) {
+ threads.emplace_back(SendRpc, stub_.get(), 10, false);
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i].join();
+ }
+}
+
TEST_P(End2endTest, ManyStubs) {
MAYBE_SKIP_TEST;
ResetStub();
@@ -887,37 +887,37 @@ TEST_P(End2endTest, ManyStubs) {
EXPECT_GT(peer.registration_attempts(), registration_attempts_pre);
}
-TEST_P(End2endTest, EmptyBinaryMetadata) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello hello hello hello");
- ClientContext context;
- context.AddMetadata("custom-bin", "");
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, ReconnectChannel) {
- MAYBE_SKIP_TEST;
- if (GetParam().inproc) {
- return;
- }
- int poller_slowdown_factor = 1;
- // It needs 2 pollset_works to reconnect the channel with polling engine
- // "poll"
-#ifdef GRPC_POSIX_SOCKET_EV
- grpc_core::UniquePtr<char> poller = GPR_GLOBAL_CONFIG_GET(grpc_poll_strategy);
- if (0 == strcmp(poller.get(), "poll")) {
- poller_slowdown_factor = 2;
- }
-#endif // GRPC_POSIX_SOCKET_EV
- ResetStub();
- SendRpc(stub_.get(), 1, false);
- RestartServer(std::shared_ptr<AuthMetadataProcessor>());
- // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
+TEST_P(End2endTest, EmptyBinaryMetadata) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello hello hello hello");
+ ClientContext context;
+ context.AddMetadata("custom-bin", "");
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, ReconnectChannel) {
+ MAYBE_SKIP_TEST;
+ if (GetParam().inproc) {
+ return;
+ }
+ int poller_slowdown_factor = 1;
+ // It needs 2 pollset_works to reconnect the channel with polling engine
+ // "poll"
+#ifdef GRPC_POSIX_SOCKET_EV
+ grpc_core::UniquePtr<char> poller = GPR_GLOBAL_CONFIG_GET(grpc_poll_strategy);
+ if (0 == strcmp(poller.get(), "poll")) {
+ poller_slowdown_factor = 2;
+ }
+#endif // GRPC_POSIX_SOCKET_EV
+ ResetStub();
+ SendRpc(stub_.get(), 1, false);
+ RestartServer(std::shared_ptr<AuthMetadataProcessor>());
+ // It needs more than GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS time to
// reconnect the channel. Make it a factor of 5x
gpr_sleep_until(
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
@@ -925,309 +925,309 @@ TEST_P(End2endTest, ReconnectChannel) {
poller_slowdown_factor *
grpc_test_slowdown_factor(),
GPR_TIMESPAN)));
- SendRpc(stub_.get(), 1, false);
-}
-
-TEST_P(End2endTest, RequestStreamOneRequest) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- auto stream = stub_->RequestStream(&context, &response);
- request.set_message("hello");
- EXPECT_TRUE(stream->Write(request));
- stream->WritesDone();
- Status s = stream->Finish();
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
- EXPECT_TRUE(context.debug_error_string().empty());
-}
-
-TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- context.set_initial_metadata_corked(true);
- auto stream = stub_->RequestStream(&context, &response);
- request.set_message("hello");
- stream->WriteLast(request, WriteOptions());
- Status s = stream->Finish();
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, RequestStreamTwoRequests) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- auto stream = stub_->RequestStream(&context, &response);
- request.set_message("hello");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Write(request));
- stream->WritesDone();
- Status s = stream->Finish();
- EXPECT_EQ(response.message(), "hellohello");
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- auto stream = stub_->RequestStream(&context, &response);
- request.set_message("hello");
- EXPECT_TRUE(stream->Write(request, WriteOptions().set_write_through()));
- EXPECT_TRUE(stream->Write(request, WriteOptions().set_write_through()));
- stream->WritesDone();
- Status s = stream->Finish();
- EXPECT_EQ(response.message(), "hellohello");
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- context.set_initial_metadata_corked(true);
- auto stream = stub_->RequestStream(&context, &response);
- request.set_message("hello");
- EXPECT_TRUE(stream->Write(request));
- stream->WriteLast(request, WriteOptions());
- Status s = stream->Finish();
- EXPECT_EQ(response.message(), "hellohello");
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, ResponseStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
-
- auto stream = stub_->ResponseStream(&context, request);
- for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
- EXPECT_TRUE(stream->Read(&response));
+ SendRpc(stub_.get(), 1, false);
+}
+
+TEST_P(End2endTest, RequestStreamOneRequest) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ auto stream = stub_->RequestStream(&context, &response);
+ request.set_message("hello");
+ EXPECT_TRUE(stream->Write(request));
+ stream->WritesDone();
+ Status s = stream->Finish();
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+ EXPECT_TRUE(context.debug_error_string().empty());
+}
+
+TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ context.set_initial_metadata_corked(true);
+ auto stream = stub_->RequestStream(&context, &response);
+ request.set_message("hello");
+ stream->WriteLast(request, WriteOptions());
+ Status s = stream->Finish();
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, RequestStreamTwoRequests) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ auto stream = stub_->RequestStream(&context, &response);
+ request.set_message("hello");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Write(request));
+ stream->WritesDone();
+ Status s = stream->Finish();
+ EXPECT_EQ(response.message(), "hellohello");
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ auto stream = stub_->RequestStream(&context, &response);
+ request.set_message("hello");
+ EXPECT_TRUE(stream->Write(request, WriteOptions().set_write_through()));
+ EXPECT_TRUE(stream->Write(request, WriteOptions().set_write_through()));
+ stream->WritesDone();
+ Status s = stream->Finish();
+ EXPECT_EQ(response.message(), "hellohello");
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ context.set_initial_metadata_corked(true);
+ auto stream = stub_->RequestStream(&context, &response);
+ request.set_message("hello");
+ EXPECT_TRUE(stream->Write(request));
+ stream->WriteLast(request, WriteOptions());
+ Status s = stream->Finish();
+ EXPECT_EQ(response.message(), "hellohello");
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, ResponseStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+
+ auto stream = stub_->ResponseStream(&context, request);
+ for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
+ EXPECT_TRUE(stream->Read(&response));
EXPECT_EQ(response.message(), request.message() + ToString(i));
- }
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
- context.AddMetadata(kServerUseCoalescingApi, "1");
-
- auto stream = stub_->ResponseStream(&context, request);
- for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
- EXPECT_TRUE(stream->Read(&response));
+ }
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+ context.AddMetadata(kServerUseCoalescingApi, "1");
+
+ auto stream = stub_->ResponseStream(&context, request);
+ for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
+ EXPECT_TRUE(stream->Read(&response));
EXPECT_EQ(response.message(), request.message() + ToString(i));
- }
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-// This was added to prevent regression from issue:
-// https://github.com/grpc/grpc/issues/11546
-TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
- context.AddMetadata(kServerUseCoalescingApi, "1");
- // We will only send one message, forcing everything (init metadata, message,
- // trailing) to be coalesced together.
- context.AddMetadata(kServerResponseStreamsToSend, "1");
-
- auto stream = stub_->ResponseStream(&context, request);
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "0");
-
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, BidiStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
+ }
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+// This was added to prevent regression from issue:
+// https://github.com/grpc/grpc/issues/11546
+TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+ context.AddMetadata(kServerUseCoalescingApi, "1");
+ // We will only send one message, forcing everything (init metadata, message,
+ // trailing) to be coalesced together.
+ context.AddMetadata(kServerResponseStreamsToSend, "1");
+
+ auto stream = stub_->ResponseStream(&context, request);
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "0");
+
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, BidiStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
TString msg("hello");
-
- auto stream = stub_->BidiStream(&context);
-
- for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
+
+ auto stream = stub_->BidiStream(&context);
+
+ for (int i = 0; i < kServerDefaultResponseStreamsToSend; ++i) {
request.set_message(msg + ToString(i));
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
- }
-
- stream->WritesDone();
- EXPECT_FALSE(stream->Read(&response));
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.AddMetadata(kServerFinishAfterNReads, "3");
- context.set_initial_metadata_corked(true);
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+ }
+
+ stream->WritesDone();
+ EXPECT_FALSE(stream->Read(&response));
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.AddMetadata(kServerFinishAfterNReads, "3");
+ context.set_initial_metadata_corked(true);
TString msg("hello");
-
- auto stream = stub_->BidiStream(&context);
-
- request.set_message(msg + "0");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "1");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "2");
- stream->WriteLast(request, WriteOptions());
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- EXPECT_FALSE(stream->Read(&response));
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-// This was added to prevent regression from issue:
-// https://github.com/grpc/grpc/issues/11546
-TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.AddMetadata(kServerFinishAfterNReads, "1");
- context.set_initial_metadata_corked(true);
+
+ auto stream = stub_->BidiStream(&context);
+
+ request.set_message(msg + "0");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "1");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "2");
+ stream->WriteLast(request, WriteOptions());
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ EXPECT_FALSE(stream->Read(&response));
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+// This was added to prevent regression from issue:
+// https://github.com/grpc/grpc/issues/11546
+TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.AddMetadata(kServerFinishAfterNReads, "1");
+ context.set_initial_metadata_corked(true);
TString msg("hello");
-
- auto stream = stub_->BidiStream(&context);
-
- request.set_message(msg + "0");
- stream->WriteLast(request, WriteOptions());
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- EXPECT_FALSE(stream->Read(&response));
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-// Talk to the two services with the same name but different package names.
-// The two stubs are created on the same channel.
-TEST_P(End2endTest, DiffPackageServices) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-
- std::unique_ptr<grpc::testing::duplicate::EchoTestService::Stub> dup_pkg_stub(
- grpc::testing::duplicate::EchoTestService::NewStub(channel_));
- ClientContext context2;
- s = dup_pkg_stub->Echo(&context2, request, &response);
- EXPECT_EQ("no package", response.message());
- EXPECT_TRUE(s.ok());
-}
-
-template <class ServiceType>
-void CancelRpc(ClientContext* context, int delay_us, ServiceType* service) {
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(delay_us, GPR_TIMESPAN)));
- while (!service->signal_client()) {
- }
- context->TryCancel();
-}
-
-TEST_P(End2endTest, CancelRpcBeforeStart) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
- context.TryCancel();
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ("", response.message());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
+
+ auto stream = stub_->BidiStream(&context);
+
+ request.set_message(msg + "0");
+ stream->WriteLast(request, WriteOptions());
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ EXPECT_FALSE(stream->Read(&response));
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+// Talk to the two services with the same name but different package names.
+// The two stubs are created on the same channel.
+TEST_P(End2endTest, DiffPackageServices) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+
+ std::unique_ptr<grpc::testing::duplicate::EchoTestService::Stub> dup_pkg_stub(
+ grpc::testing::duplicate::EchoTestService::NewStub(channel_));
+ ClientContext context2;
+ s = dup_pkg_stub->Echo(&context2, request, &response);
+ EXPECT_EQ("no package", response.message());
+ EXPECT_TRUE(s.ok());
+}
+
+template <class ServiceType>
+void CancelRpc(ClientContext* context, int delay_us, ServiceType* service) {
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(delay_us, GPR_TIMESPAN)));
+ while (!service->signal_client()) {
+ }
+ context->TryCancel();
+}
+
+TEST_P(End2endTest, CancelRpcBeforeStart) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+ context.TryCancel();
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ("", response.message());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
TEST_P(End2endTest, CancelRpcAfterStart) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
request.mutable_param()->set_server_notify_client_when_started(true);
- request.mutable_param()->set_skip_cancelled_check(true);
- Status s;
- std::thread echo_thread([this, &s, &context, &request, &response] {
- s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
- });
+ request.mutable_param()->set_skip_cancelled_check(true);
+ Status s;
+ std::thread echo_thread([this, &s, &context, &request, &response] {
+ s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
+ });
if (!GetParam().callback_server) {
service_.ClientWaitUntilRpcStarted();
} else {
callback_service_.ClientWaitUntilRpcStarted();
}
- context.TryCancel();
+ context.TryCancel();
if (!GetParam().callback_server) {
service_.SignalServerToContinue();
@@ -1235,1123 +1235,1123 @@ TEST_P(End2endTest, CancelRpcAfterStart) {
callback_service_.SignalServerToContinue();
}
- echo_thread.join();
- EXPECT_EQ("", response.message());
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Client cancels request stream after sending two messages
-TEST_P(End2endTest, ClientCancelsRequestStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
-
- auto stream = stub_->RequestStream(&context, &response);
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Write(request));
-
- context.TryCancel();
-
- Status s = stream->Finish();
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
-
- EXPECT_EQ(response.message(), "");
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Client cancels server stream after sending some messages
-TEST_P(End2endTest, ClientCancelsResponseStream) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("hello");
-
- auto stream = stub_->ResponseStream(&context, request);
-
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "0");
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "1");
-
- context.TryCancel();
-
- // The cancellation races with responses, so there might be zero or
- // one responses pending, read till failure
-
- if (stream->Read(&response)) {
- EXPECT_EQ(response.message(), request.message() + "2");
- // Since we have cancelled, we expect the next attempt to read to fail
- EXPECT_FALSE(stream->Read(&response));
- }
-
- Status s = stream->Finish();
- // The final status could be either of CANCELLED or OK depending on
- // who won the race.
- EXPECT_GE(grpc::StatusCode::CANCELLED, s.error_code());
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-// Client cancels bidi stream after sending some messages
-TEST_P(End2endTest, ClientCancelsBidi) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
+ echo_thread.join();
+ EXPECT_EQ("", response.message());
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Client cancels request stream after sending two messages
+TEST_P(End2endTest, ClientCancelsRequestStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+
+ auto stream = stub_->RequestStream(&context, &response);
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Write(request));
+
+ context.TryCancel();
+
+ Status s = stream->Finish();
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+
+ EXPECT_EQ(response.message(), "");
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Client cancels server stream after sending some messages
+TEST_P(End2endTest, ClientCancelsResponseStream) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("hello");
+
+ auto stream = stub_->ResponseStream(&context, request);
+
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "0");
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "1");
+
+ context.TryCancel();
+
+ // The cancellation races with responses, so there might be zero or
+ // one responses pending, read till failure
+
+ if (stream->Read(&response)) {
+ EXPECT_EQ(response.message(), request.message() + "2");
+ // Since we have cancelled, we expect the next attempt to read to fail
+ EXPECT_FALSE(stream->Read(&response));
+ }
+
+ Status s = stream->Finish();
+ // The final status could be either of CANCELLED or OK depending on
+ // who won the race.
+ EXPECT_GE(grpc::StatusCode::CANCELLED, s.error_code());
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+// Client cancels bidi stream after sending some messages
+TEST_P(End2endTest, ClientCancelsBidi) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
TString msg("hello");
-
- auto stream = stub_->BidiStream(&context);
-
- request.set_message(msg + "0");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "1");
- EXPECT_TRUE(stream->Write(request));
-
- context.TryCancel();
-
- // The cancellation races with responses, so there might be zero or
- // one responses pending, read till failure
-
- if (stream->Read(&response)) {
- EXPECT_EQ(response.message(), request.message());
- // Since we have cancelled, we expect the next attempt to read to fail
- EXPECT_FALSE(stream->Read(&response));
- }
-
- Status s = stream->Finish();
- EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
- if (GetParam().use_interceptors) {
- EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
- }
-}
-
-TEST_P(End2endTest, RpcMaxMessageSize) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message(string(kMaxMessageSize_ * 2, 'a'));
- request.mutable_param()->set_server_die(true);
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
-}
-
-void ReaderThreadFunc(ClientReaderWriter<EchoRequest, EchoResponse>* stream,
- gpr_event* ev) {
- EchoResponse resp;
- gpr_event_set(ev, (void*)1);
- while (stream->Read(&resp)) {
- gpr_log(GPR_INFO, "Read message");
- }
-}
-
-// Run a Read and a WritesDone simultaneously.
-TEST_P(End2endTest, SimultaneousReadWritesDone) {
- MAYBE_SKIP_TEST;
- ResetStub();
- ClientContext context;
- gpr_event ev;
- gpr_event_init(&ev);
- auto stream = stub_->BidiStream(&context);
- std::thread reader_thread(ReaderThreadFunc, stream.get(), &ev);
- gpr_event_wait(&ev, gpr_inf_future(GPR_CLOCK_REALTIME));
- stream->WritesDone();
- reader_thread.join();
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(End2endTest, ChannelState) {
- MAYBE_SKIP_TEST;
- if (GetParam().inproc) {
- return;
- }
-
- ResetStub();
- // Start IDLE
- EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(false));
-
- // Did not ask to connect, no state change.
- CompletionQueue cq;
- std::chrono::system_clock::time_point deadline =
- std::chrono::system_clock::now() + std::chrono::milliseconds(10);
- channel_->NotifyOnStateChange(GRPC_CHANNEL_IDLE, deadline, &cq, nullptr);
- void* tag;
- bool ok = true;
- cq.Next(&tag, &ok);
- EXPECT_FALSE(ok);
-
- EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(true));
- EXPECT_TRUE(channel_->WaitForStateChange(GRPC_CHANNEL_IDLE,
- gpr_inf_future(GPR_CLOCK_REALTIME)));
- auto state = channel_->GetState(false);
- EXPECT_TRUE(state == GRPC_CHANNEL_CONNECTING || state == GRPC_CHANNEL_READY);
-}
-
-// Takes 10s.
-TEST_P(End2endTest, ChannelStateTimeout) {
- if ((GetParam().credentials_type != kInsecureCredentialsType) ||
- GetParam().inproc) {
- return;
- }
- int port = grpc_pick_unused_port_or_die();
- std::ostringstream server_address;
- server_address << "127.0.0.1:" << port;
- // Channel to non-existing server
- auto channel =
- grpc::CreateChannel(server_address.str(), InsecureChannelCredentials());
- // Start IDLE
- EXPECT_EQ(GRPC_CHANNEL_IDLE, channel->GetState(true));
-
- auto state = GRPC_CHANNEL_IDLE;
- for (int i = 0; i < 10; i++) {
- channel->WaitForStateChange(
- state, std::chrono::system_clock::now() + std::chrono::seconds(1));
- state = channel->GetState(false);
- }
-}
-
-// Talking to a non-existing service.
-TEST_P(End2endTest, NonExistingService) {
- MAYBE_SKIP_TEST;
- ResetChannel();
- std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
- stub = grpc::testing::UnimplementedEchoService::NewStub(channel_);
-
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- ClientContext context;
- Status s = stub->Unimplemented(&context, request, &response);
- EXPECT_EQ(StatusCode::UNIMPLEMENTED, s.error_code());
- EXPECT_EQ("", s.error_message());
-}
-
-// Ask the server to send back a serialized proto in trailer.
-// This is an example of setting error details.
-TEST_P(End2endTest, BinaryTrailerTest) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- request.mutable_param()->set_echo_metadata(true);
- DebugInfo* info = request.mutable_param()->mutable_debug_info();
- info->add_stack_entries("stack_entry_1");
- info->add_stack_entries("stack_entry_2");
- info->add_stack_entries("stack_entry_3");
- info->set_detail("detailed debug info");
+
+ auto stream = stub_->BidiStream(&context);
+
+ request.set_message(msg + "0");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "1");
+ EXPECT_TRUE(stream->Write(request));
+
+ context.TryCancel();
+
+ // The cancellation races with responses, so there might be zero or
+ // one responses pending, read till failure
+
+ if (stream->Read(&response)) {
+ EXPECT_EQ(response.message(), request.message());
+ // Since we have cancelled, we expect the next attempt to read to fail
+ EXPECT_FALSE(stream->Read(&response));
+ }
+
+ Status s = stream->Finish();
+ EXPECT_EQ(grpc::StatusCode::CANCELLED, s.error_code());
+ if (GetParam().use_interceptors) {
+ EXPECT_EQ(20, DummyInterceptor::GetNumTimesCancel());
+ }
+}
+
+TEST_P(End2endTest, RpcMaxMessageSize) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message(string(kMaxMessageSize_ * 2, 'a'));
+ request.mutable_param()->set_server_die(true);
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+}
+
+void ReaderThreadFunc(ClientReaderWriter<EchoRequest, EchoResponse>* stream,
+ gpr_event* ev) {
+ EchoResponse resp;
+ gpr_event_set(ev, (void*)1);
+ while (stream->Read(&resp)) {
+ gpr_log(GPR_INFO, "Read message");
+ }
+}
+
+// Run a Read and a WritesDone simultaneously.
+TEST_P(End2endTest, SimultaneousReadWritesDone) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ ClientContext context;
+ gpr_event ev;
+ gpr_event_init(&ev);
+ auto stream = stub_->BidiStream(&context);
+ std::thread reader_thread(ReaderThreadFunc, stream.get(), &ev);
+ gpr_event_wait(&ev, gpr_inf_future(GPR_CLOCK_REALTIME));
+ stream->WritesDone();
+ reader_thread.join();
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(End2endTest, ChannelState) {
+ MAYBE_SKIP_TEST;
+ if (GetParam().inproc) {
+ return;
+ }
+
+ ResetStub();
+ // Start IDLE
+ EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(false));
+
+ // Did not ask to connect, no state change.
+ CompletionQueue cq;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::milliseconds(10);
+ channel_->NotifyOnStateChange(GRPC_CHANNEL_IDLE, deadline, &cq, nullptr);
+ void* tag;
+ bool ok = true;
+ cq.Next(&tag, &ok);
+ EXPECT_FALSE(ok);
+
+ EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(true));
+ EXPECT_TRUE(channel_->WaitForStateChange(GRPC_CHANNEL_IDLE,
+ gpr_inf_future(GPR_CLOCK_REALTIME)));
+ auto state = channel_->GetState(false);
+ EXPECT_TRUE(state == GRPC_CHANNEL_CONNECTING || state == GRPC_CHANNEL_READY);
+}
+
+// Takes 10s.
+TEST_P(End2endTest, ChannelStateTimeout) {
+ if ((GetParam().credentials_type != kInsecureCredentialsType) ||
+ GetParam().inproc) {
+ return;
+ }
+ int port = grpc_pick_unused_port_or_die();
+ std::ostringstream server_address;
+ server_address << "127.0.0.1:" << port;
+ // Channel to non-existing server
+ auto channel =
+ grpc::CreateChannel(server_address.str(), InsecureChannelCredentials());
+ // Start IDLE
+ EXPECT_EQ(GRPC_CHANNEL_IDLE, channel->GetState(true));
+
+ auto state = GRPC_CHANNEL_IDLE;
+ for (int i = 0; i < 10; i++) {
+ channel->WaitForStateChange(
+ state, std::chrono::system_clock::now() + std::chrono::seconds(1));
+ state = channel->GetState(false);
+ }
+}
+
+// Talking to a non-existing service.
+TEST_P(End2endTest, NonExistingService) {
+ MAYBE_SKIP_TEST;
+ ResetChannel();
+ std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
+ stub = grpc::testing::UnimplementedEchoService::NewStub(channel_);
+
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ ClientContext context;
+ Status s = stub->Unimplemented(&context, request, &response);
+ EXPECT_EQ(StatusCode::UNIMPLEMENTED, s.error_code());
+ EXPECT_EQ("", s.error_message());
+}
+
+// Ask the server to send back a serialized proto in trailer.
+// This is an example of setting error details.
+TEST_P(End2endTest, BinaryTrailerTest) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ request.mutable_param()->set_echo_metadata(true);
+ DebugInfo* info = request.mutable_param()->mutable_debug_info();
+ info->add_stack_entries("stack_entry_1");
+ info->add_stack_entries("stack_entry_2");
+ info->add_stack_entries("stack_entry_3");
+ info->set_detail("detailed debug info");
TString expected_string = info->SerializeAsString();
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- auto trailers = context.GetServerTrailingMetadata();
- EXPECT_EQ(1u, trailers.count(kDebugInfoTrailerKey));
- auto iter = trailers.find(kDebugInfoTrailerKey);
- EXPECT_EQ(expected_string, iter->second);
- // Parse the returned trailer into a DebugInfo proto.
- DebugInfo returned_info;
- EXPECT_TRUE(returned_info.ParseFromString(ToString(iter->second)));
-}
-
-TEST_P(End2endTest, ExpectErrorTest) {
- MAYBE_SKIP_TEST;
- ResetStub();
-
- std::vector<ErrorStatus> expected_status;
- expected_status.emplace_back();
- expected_status.back().set_code(13); // INTERNAL
- // No Error message or details
-
- expected_status.emplace_back();
- expected_status.back().set_code(13); // INTERNAL
- expected_status.back().set_error_message("text error message");
- expected_status.back().set_binary_error_details("text error details");
-
- expected_status.emplace_back();
- expected_status.back().set_code(13); // INTERNAL
- expected_status.back().set_error_message("text error message");
- expected_status.back().set_binary_error_details(
- "\x0\x1\x2\x3\x4\x5\x6\x8\x9\xA\xB");
-
- for (auto iter = expected_status.begin(); iter != expected_status.end();
- ++iter) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- request.set_message("Hello");
- auto* error = request.mutable_param()->mutable_expected_error();
- error->set_code(iter->code());
- error->set_error_message(iter->error_message());
- error->set_binary_error_details(iter->binary_error_details());
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(iter->code(), s.error_code());
- EXPECT_EQ(iter->error_message(), s.error_message());
- EXPECT_EQ(iter->binary_error_details(), s.error_details());
- EXPECT_TRUE(context.debug_error_string().find("created") !=
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ auto trailers = context.GetServerTrailingMetadata();
+ EXPECT_EQ(1u, trailers.count(kDebugInfoTrailerKey));
+ auto iter = trailers.find(kDebugInfoTrailerKey);
+ EXPECT_EQ(expected_string, iter->second);
+ // Parse the returned trailer into a DebugInfo proto.
+ DebugInfo returned_info;
+ EXPECT_TRUE(returned_info.ParseFromString(ToString(iter->second)));
+}
+
+TEST_P(End2endTest, ExpectErrorTest) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+
+ std::vector<ErrorStatus> expected_status;
+ expected_status.emplace_back();
+ expected_status.back().set_code(13); // INTERNAL
+ // No Error message or details
+
+ expected_status.emplace_back();
+ expected_status.back().set_code(13); // INTERNAL
+ expected_status.back().set_error_message("text error message");
+ expected_status.back().set_binary_error_details("text error details");
+
+ expected_status.emplace_back();
+ expected_status.back().set_code(13); // INTERNAL
+ expected_status.back().set_error_message("text error message");
+ expected_status.back().set_binary_error_details(
+ "\x0\x1\x2\x3\x4\x5\x6\x8\x9\xA\xB");
+
+ for (auto iter = expected_status.begin(); iter != expected_status.end();
+ ++iter) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ request.set_message("Hello");
+ auto* error = request.mutable_param()->mutable_expected_error();
+ error->set_code(iter->code());
+ error->set_error_message(iter->error_message());
+ error->set_binary_error_details(iter->binary_error_details());
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(iter->code(), s.error_code());
+ EXPECT_EQ(iter->error_message(), s.error_message());
+ EXPECT_EQ(iter->binary_error_details(), s.error_details());
+ EXPECT_TRUE(context.debug_error_string().find("created") !=
TString::npos);
EXPECT_TRUE(context.debug_error_string().find("file") != TString::npos);
EXPECT_TRUE(context.debug_error_string().find("line") != TString::npos);
- EXPECT_TRUE(context.debug_error_string().find("status") !=
+ EXPECT_TRUE(context.debug_error_string().find("status") !=
TString::npos);
EXPECT_TRUE(context.debug_error_string().find("13") != TString::npos);
- }
-}
-
-//////////////////////////////////////////////////////////////////////////
-// Test with and without a proxy.
-class ProxyEnd2endTest : public End2endTest {
- protected:
-};
-
-TEST_P(ProxyEnd2endTest, SimpleRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- SendRpc(stub_.get(), 1, false);
-}
-
-TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(ProxyEnd2endTest, MultipleRpcs) {
- MAYBE_SKIP_TEST;
- ResetStub();
- std::vector<std::thread> threads;
- threads.reserve(10);
- for (int i = 0; i < 10; ++i) {
- threads.emplace_back(SendRpc, stub_.get(), 10, false);
- }
- for (int i = 0; i < 10; ++i) {
- threads[i].join();
- }
-}
-
-// Set a 10us deadline and make sure proper error is returned.
-TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- request.mutable_param()->set_skip_cancelled_check(true);
- // Let server sleep for 40 ms first to guarantee expiry.
- // 40 ms might seem a bit extreme but the timer manager would have been just
- // initialized (when ResetStub() was called) and there are some warmup costs
- // i.e the timer thread many not have even started. There might also be other
- // delays in the timer manager thread (in acquiring locks, timer data
- // structure manipulations, starting backup timer threads) that add to the
- // delays. 40ms is still not enough in some cases but this significantly
- // reduces the test flakes
- request.mutable_param()->set_server_sleep_us(40 * 1000);
-
- ClientContext context;
- std::chrono::system_clock::time_point deadline =
- std::chrono::system_clock::now() + std::chrono::milliseconds(1);
- context.set_deadline(deadline);
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.error_code());
-}
-
-// Set a long but finite deadline.
-TEST_P(ProxyEnd2endTest, RpcLongDeadline) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- ClientContext context;
- std::chrono::system_clock::time_point deadline =
- std::chrono::system_clock::now() + std::chrono::hours(1);
- context.set_deadline(deadline);
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-}
-
-// Ask server to echo back the deadline it sees.
-TEST_P(ProxyEnd2endTest, EchoDeadline) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- request.mutable_param()->set_echo_deadline(true);
-
- ClientContext context;
- std::chrono::system_clock::time_point deadline =
- std::chrono::system_clock::now() + std::chrono::seconds(100);
- context.set_deadline(deadline);
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
- gpr_timespec sent_deadline;
- Timepoint2Timespec(deadline, &sent_deadline);
- // We want to allow some reasonable error given:
- // - request_deadline() only has 1sec resolution so the best we can do is +-1
- // - if sent_deadline.tv_nsec is very close to the next second's boundary we
- // can end up being off by 2 in one direction.
- EXPECT_LE(response.param().request_deadline() - sent_deadline.tv_sec, 2);
- EXPECT_GE(response.param().request_deadline() - sent_deadline.tv_sec, -1);
-}
-
-// Ask server to echo back the deadline it sees. The rpc has no deadline.
-TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- request.mutable_param()->set_echo_deadline(true);
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
- EXPECT_EQ(response.param().request_deadline(),
- gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec);
-}
-
-TEST_P(ProxyEnd2endTest, UnimplementedRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- ClientContext context;
- Status s = stub_->Unimplemented(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), grpc::StatusCode::UNIMPLEMENTED);
- EXPECT_EQ(s.error_message(), "");
- EXPECT_EQ(response.message(), "");
-}
-
-// Client cancels rpc after 10ms
-TEST_P(ProxyEnd2endTest, ClientCancelsRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- const int kCancelDelayUs = 10 * 1000;
- request.mutable_param()->set_client_cancel_after_us(kCancelDelayUs);
-
- ClientContext context;
- std::thread cancel_thread;
- if (!GetParam().callback_server) {
- cancel_thread = std::thread(
- [&context, this](int delay) { CancelRpc(&context, delay, &service_); },
- kCancelDelayUs);
- // Note: the unusual pattern above (and below) is caused by a conflict
- // between two sets of compiler expectations. clang allows const to be
- // captured without mention, so there is no need to capture kCancelDelayUs
- // (and indeed clang-tidy complains if you do so). OTOH, a Windows compiler
- // in our tests requires an explicit capture even for const. We square this
- // circle by passing the const value in as an argument to the lambda.
- } else {
- cancel_thread = std::thread(
- [&context, this](int delay) {
- CancelRpc(&context, delay, &callback_service_);
- },
- kCancelDelayUs);
- }
- Status s = stub_->Echo(&context, request, &response);
- cancel_thread.join();
- EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
- EXPECT_EQ(s.error_message(), "Cancelled");
-}
-
-// Server cancels rpc after 1ms
-TEST_P(ProxyEnd2endTest, ServerCancelsRpc) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- request.mutable_param()->set_server_cancel_after_us(1000);
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
- EXPECT_TRUE(s.error_message().empty());
-}
-
-// Make the response larger than the flow control window.
-TEST_P(ProxyEnd2endTest, HugeResponse) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("huge response");
- const size_t kResponseSize = 1024 * (1024 + 10);
- request.mutable_param()->set_response_message_length(kResponseSize);
-
- ClientContext context;
- std::chrono::system_clock::time_point deadline =
- std::chrono::system_clock::now() + std::chrono::seconds(20);
- context.set_deadline(deadline);
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(kResponseSize, response.message().size());
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(ProxyEnd2endTest, Peer) {
- MAYBE_SKIP_TEST;
- // Peer is not meaningful for inproc
- if (GetParam().inproc) {
- return;
- }
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("hello");
- request.mutable_param()->set_echo_peer(true);
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
- EXPECT_TRUE(CheckIsLocalhost(response.param().peer()));
- EXPECT_TRUE(CheckIsLocalhost(context.peer()));
-}
-
-//////////////////////////////////////////////////////////////////////////
-class SecureEnd2endTest : public End2endTest {
- protected:
- SecureEnd2endTest() {
- GPR_ASSERT(!GetParam().use_proxy);
- GPR_ASSERT(GetParam().credentials_type != kInsecureCredentialsType);
- }
-};
-
-TEST_P(SecureEnd2endTest, SimpleRpcWithHost) {
- MAYBE_SKIP_TEST;
- ResetStub();
-
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- ClientContext context;
- context.set_authority("foo.test.youtube.com");
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(response.has_param());
- EXPECT_EQ("special", response.param().host());
- EXPECT_TRUE(s.ok());
-}
-
-bool MetadataContains(
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
+// Test with and without a proxy.
+class ProxyEnd2endTest : public End2endTest {
+ protected:
+};
+
+TEST_P(ProxyEnd2endTest, SimpleRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ SendRpc(stub_.get(), 1, false);
+}
+
+TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(ProxyEnd2endTest, MultipleRpcs) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ std::vector<std::thread> threads;
+ threads.reserve(10);
+ for (int i = 0; i < 10; ++i) {
+ threads.emplace_back(SendRpc, stub_.get(), 10, false);
+ }
+ for (int i = 0; i < 10; ++i) {
+ threads[i].join();
+ }
+}
+
+// Set a 10us deadline and make sure proper error is returned.
+TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ request.mutable_param()->set_skip_cancelled_check(true);
+ // Let server sleep for 40 ms first to guarantee expiry.
+ // 40 ms might seem a bit extreme but the timer manager would have been just
+ // initialized (when ResetStub() was called) and there are some warmup costs
+ // i.e the timer thread many not have even started. There might also be other
+ // delays in the timer manager thread (in acquiring locks, timer data
+ // structure manipulations, starting backup timer threads) that add to the
+ // delays. 40ms is still not enough in some cases but this significantly
+ // reduces the test flakes
+ request.mutable_param()->set_server_sleep_us(40 * 1000);
+
+ ClientContext context;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::milliseconds(1);
+ context.set_deadline(deadline);
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.error_code());
+}
+
+// Set a long but finite deadline.
+TEST_P(ProxyEnd2endTest, RpcLongDeadline) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ ClientContext context;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::hours(1);
+ context.set_deadline(deadline);
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+}
+
+// Ask server to echo back the deadline it sees.
+TEST_P(ProxyEnd2endTest, EchoDeadline) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_deadline(true);
+
+ ClientContext context;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::seconds(100);
+ context.set_deadline(deadline);
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+ gpr_timespec sent_deadline;
+ Timepoint2Timespec(deadline, &sent_deadline);
+ // We want to allow some reasonable error given:
+ // - request_deadline() only has 1sec resolution so the best we can do is +-1
+ // - if sent_deadline.tv_nsec is very close to the next second's boundary we
+ // can end up being off by 2 in one direction.
+ EXPECT_LE(response.param().request_deadline() - sent_deadline.tv_sec, 2);
+ EXPECT_GE(response.param().request_deadline() - sent_deadline.tv_sec, -1);
+}
+
+// Ask server to echo back the deadline it sees. The rpc has no deadline.
+TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_deadline(true);
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+ EXPECT_EQ(response.param().request_deadline(),
+ gpr_inf_future(GPR_CLOCK_REALTIME).tv_sec);
+}
+
+TEST_P(ProxyEnd2endTest, UnimplementedRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ ClientContext context;
+ Status s = stub_->Unimplemented(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), grpc::StatusCode::UNIMPLEMENTED);
+ EXPECT_EQ(s.error_message(), "");
+ EXPECT_EQ(response.message(), "");
+}
+
+// Client cancels rpc after 10ms
+TEST_P(ProxyEnd2endTest, ClientCancelsRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ const int kCancelDelayUs = 10 * 1000;
+ request.mutable_param()->set_client_cancel_after_us(kCancelDelayUs);
+
+ ClientContext context;
+ std::thread cancel_thread;
+ if (!GetParam().callback_server) {
+ cancel_thread = std::thread(
+ [&context, this](int delay) { CancelRpc(&context, delay, &service_); },
+ kCancelDelayUs);
+ // Note: the unusual pattern above (and below) is caused by a conflict
+ // between two sets of compiler expectations. clang allows const to be
+ // captured without mention, so there is no need to capture kCancelDelayUs
+ // (and indeed clang-tidy complains if you do so). OTOH, a Windows compiler
+ // in our tests requires an explicit capture even for const. We square this
+ // circle by passing the const value in as an argument to the lambda.
+ } else {
+ cancel_thread = std::thread(
+ [&context, this](int delay) {
+ CancelRpc(&context, delay, &callback_service_);
+ },
+ kCancelDelayUs);
+ }
+ Status s = stub_->Echo(&context, request, &response);
+ cancel_thread.join();
+ EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
+ EXPECT_EQ(s.error_message(), "Cancelled");
+}
+
+// Server cancels rpc after 1ms
+TEST_P(ProxyEnd2endTest, ServerCancelsRpc) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ request.mutable_param()->set_server_cancel_after_us(1000);
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(StatusCode::CANCELLED, s.error_code());
+ EXPECT_TRUE(s.error_message().empty());
+}
+
+// Make the response larger than the flow control window.
+TEST_P(ProxyEnd2endTest, HugeResponse) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("huge response");
+ const size_t kResponseSize = 1024 * (1024 + 10);
+ request.mutable_param()->set_response_message_length(kResponseSize);
+
+ ClientContext context;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::seconds(20);
+ context.set_deadline(deadline);
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(kResponseSize, response.message().size());
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(ProxyEnd2endTest, Peer) {
+ MAYBE_SKIP_TEST;
+ // Peer is not meaningful for inproc
+ if (GetParam().inproc) {
+ return;
+ }
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("hello");
+ request.mutable_param()->set_echo_peer(true);
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+ EXPECT_TRUE(CheckIsLocalhost(response.param().peer()));
+ EXPECT_TRUE(CheckIsLocalhost(context.peer()));
+}
+
+//////////////////////////////////////////////////////////////////////////
+class SecureEnd2endTest : public End2endTest {
+ protected:
+ SecureEnd2endTest() {
+ GPR_ASSERT(!GetParam().use_proxy);
+ GPR_ASSERT(GetParam().credentials_type != kInsecureCredentialsType);
+ }
+};
+
+TEST_P(SecureEnd2endTest, SimpleRpcWithHost) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ ClientContext context;
+ context.set_authority("foo.test.youtube.com");
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(response.has_param());
+ EXPECT_EQ("special", response.param().host());
+ EXPECT_TRUE(s.ok());
+}
+
+bool MetadataContains(
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
const TString& key, const TString& value) {
- int count = 0;
-
- for (std::multimap<grpc::string_ref, grpc::string_ref>::const_iterator iter =
- metadata.begin();
- iter != metadata.end(); ++iter) {
- if (ToString(iter->first) == key && ToString(iter->second) == value) {
- count++;
- }
- }
- return count == 1;
-}
-
-TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
- MAYBE_SKIP_TEST;
- auto* processor = new TestAuthMetadataProcessor(true);
- StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(processor->GetCompatibleClientCreds());
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
- request.mutable_param()->set_expected_client_identity(
- TestAuthMetadataProcessor::kGoodGuy);
- request.mutable_param()->set_expected_transport_security_type(
- GetParam().credentials_type);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
-
- // Metadata should have been consumed by the processor.
- EXPECT_FALSE(MetadataContains(
- context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY,
+ int count = 0;
+
+ for (std::multimap<grpc::string_ref, grpc::string_ref>::const_iterator iter =
+ metadata.begin();
+ iter != metadata.end(); ++iter) {
+ if (ToString(iter->first) == key && ToString(iter->second) == value) {
+ count++;
+ }
+ }
+ return count == 1;
+}
+
+TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
+ MAYBE_SKIP_TEST;
+ auto* processor = new TestAuthMetadataProcessor(true);
+ StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(processor->GetCompatibleClientCreds());
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+ request.mutable_param()->set_expected_client_identity(
+ TestAuthMetadataProcessor::kGoodGuy);
+ request.mutable_param()->set_expected_transport_security_type(
+ GetParam().credentials_type);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+
+ // Metadata should have been consumed by the processor.
+ EXPECT_FALSE(MetadataContains(
+ context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY,
TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy));
-}
-
-TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
- MAYBE_SKIP_TEST;
- auto* processor = new TestAuthMetadataProcessor(true);
- StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(processor->GetIncompatibleClientCreds());
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
-}
-
-TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- std::shared_ptr<CallCredentials> creds =
+}
+
+TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
+ MAYBE_SKIP_TEST;
+ auto* processor = new TestAuthMetadataProcessor(true);
+ StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(processor->GetIncompatibleClientCreds());
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
+}
+
+TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ std::shared_ptr<CallCredentials> creds =
GoogleIAMCredentials(kFakeToken, kFakeSelector);
- context.set_credentials(creds);
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
+ context.set_credentials(creds);
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
kFakeToken));
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
kFakeSelector));
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedFakeCredsDebugString);
-}
-
-class CredentialsInterceptor : public experimental::Interceptor {
- public:
- CredentialsInterceptor(experimental::ClientRpcInfo* info) : info_(info) {}
-
- void Intercept(experimental::InterceptorBatchMethods* methods) {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- std::shared_ptr<CallCredentials> creds =
+}
+
+class CredentialsInterceptor : public experimental::Interceptor {
+ public:
+ CredentialsInterceptor(experimental::ClientRpcInfo* info) : info_(info) {}
+
+ void Intercept(experimental::InterceptorBatchMethods* methods) {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ std::shared_ptr<CallCredentials> creds =
GoogleIAMCredentials(kFakeToken, kFakeSelector);
- info_->client_context()->set_credentials(creds);
- }
- methods->Proceed();
- }
-
- private:
- experimental::ClientRpcInfo* info_ = nullptr;
-};
-
-class CredentialsInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface {
- CredentialsInterceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* info) {
- return new CredentialsInterceptor(info);
- }
-};
-
-TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
- MAYBE_SKIP_TEST;
- if (!GetParam().use_interceptors) {
- return;
- }
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators;
- interceptor_creators.push_back(std::unique_ptr<CredentialsInterceptorFactory>(
- new CredentialsInterceptorFactory()));
- ResetStub(std::move(interceptor_creators));
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
+ info_->client_context()->set_credentials(creds);
+ }
+ methods->Proceed();
+ }
+
+ private:
+ experimental::ClientRpcInfo* info_ = nullptr;
+};
+
+class CredentialsInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface {
+ CredentialsInterceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* info) {
+ return new CredentialsInterceptor(info);
+ }
+};
+
+TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
+ MAYBE_SKIP_TEST;
+ if (!GetParam().use_interceptors) {
+ return;
+ }
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators;
+ interceptor_creators.push_back(std::unique_ptr<CredentialsInterceptorFactory>(
+ new CredentialsInterceptorFactory()));
+ ResetStub(std::move(interceptor_creators));
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
kFakeToken));
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
kFakeSelector));
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedFakeCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
- MAYBE_SKIP_TEST;
- if (!GetParam().use_interceptors) {
- return;
- }
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators;
- interceptor_creators.push_back(std::unique_ptr<CredentialsInterceptorFactory>(
- new CredentialsInterceptorFactory()));
- ResetStub(std::move(interceptor_creators));
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- std::shared_ptr<CallCredentials> creds1 =
+}
+
+TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
+ MAYBE_SKIP_TEST;
+ if (!GetParam().use_interceptors) {
+ return;
+ }
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators;
+ interceptor_creators.push_back(std::unique_ptr<CredentialsInterceptorFactory>(
+ new CredentialsInterceptorFactory()));
+ ResetStub(std::move(interceptor_creators));
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ std::shared_ptr<CallCredentials> creds1 =
GoogleIAMCredentials(kWrongToken, kWrongSelector);
- context.set_credentials(creds1);
- EXPECT_EQ(context.credentials(), creds1);
+ context.set_credentials(creds1);
+ EXPECT_EQ(context.credentials(), creds1);
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedWrongCredsDebugString);
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
kFakeToken));
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
kFakeSelector));
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedFakeCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- std::shared_ptr<CallCredentials> creds1 =
+}
+
+TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ std::shared_ptr<CallCredentials> creds1 =
GoogleIAMCredentials(kFakeToken1, kFakeSelector1);
- context.set_credentials(creds1);
- EXPECT_EQ(context.credentials(), creds1);
+ context.set_credentials(creds1);
+ EXPECT_EQ(context.credentials(), creds1);
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedFakeCreds1DebugString);
- std::shared_ptr<CallCredentials> creds2 =
+ std::shared_ptr<CallCredentials> creds2 =
GoogleIAMCredentials(kFakeToken2, kFakeSelector2);
- context.set_credentials(creds2);
- EXPECT_EQ(context.credentials(), creds2);
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
+ context.set_credentials(creds2);
+ EXPECT_EQ(context.credentials(), creds2);
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
kFakeToken2));
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
kFakeSelector2));
- EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
+ EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
kFakeToken1));
- EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(),
- GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
+ EXPECT_FALSE(MetadataContains(context.GetServerTrailingMetadata(),
+ GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
kFakeSelector1));
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedFakeCreds2DebugString);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
-}
-
-TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(
- TestMetadataCredentialsPlugin::kBadMetadataKey,
- "Does not matter, will fail the key is invalid.", false, true,
- 0))));
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+}
+
+TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(
+ TestMetadataCredentialsPlugin::kBadMetadataKey,
+ "Does not matter, will fail the key is invalid.", false, true,
+ 0))));
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedAuthMetadataPluginKeyFailureCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(
- TestMetadataCredentialsPlugin::kGoodMetadataKey,
- "With illegal \n value.", false, true, 0))));
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
+}
+
+TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(
+ TestMetadataCredentialsPlugin::kGoodMetadataKey,
+ "With illegal \n value.", false, true, 0))));
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedAuthMetadataPluginValueFailureCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- request.mutable_param()->set_skip_cancelled_check(true);
- EchoResponse response;
- ClientContext context;
- const int delay = 100;
- std::chrono::system_clock::time_point deadline =
- std::chrono::system_clock::now() + std::chrono::milliseconds(delay);
- context.set_deadline(deadline);
- context.set_credentials(grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin("meta_key", "Does not matter", true,
- true, delay))));
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- if (!s.ok()) {
- EXPECT_TRUE(s.error_code() == StatusCode::DEADLINE_EXCEEDED ||
- s.error_code() == StatusCode::UNAVAILABLE);
- }
+}
+
+TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ request.mutable_param()->set_skip_cancelled_check(true);
+ EchoResponse response;
+ ClientContext context;
+ const int delay = 100;
+ std::chrono::system_clock::time_point deadline =
+ std::chrono::system_clock::now() + std::chrono::milliseconds(delay);
+ context.set_deadline(deadline);
+ context.set_credentials(grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin("meta_key", "Does not matter", true,
+ true, delay))));
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ if (!s.ok()) {
+ EXPECT_TRUE(s.error_code() == StatusCode::DEADLINE_EXCEEDED ||
+ s.error_code() == StatusCode::UNAVAILABLE);
+ }
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedAuthMetadataPluginWithDeadlineCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- request.mutable_param()->set_skip_cancelled_check(true);
- EchoResponse response;
- ClientContext context;
- const int delay = 100;
- context.set_credentials(grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin("meta_key", "Does not matter", true,
- true, delay))));
- request.set_message("Hello");
-
- std::thread cancel_thread([&] {
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(delay, GPR_TIMESPAN)));
- context.TryCancel();
- });
- Status s = stub_->Echo(&context, request, &response);
- if (!s.ok()) {
- EXPECT_TRUE(s.error_code() == StatusCode::CANCELLED ||
- s.error_code() == StatusCode::UNAVAILABLE);
- }
- cancel_thread.join();
+}
+
+TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ request.mutable_param()->set_skip_cancelled_check(true);
+ EchoResponse response;
+ ClientContext context;
+ const int delay = 100;
+ context.set_credentials(grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin("meta_key", "Does not matter", true,
+ true, delay))));
+ request.set_message("Hello");
+
+ std::thread cancel_thread([&] {
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(delay, GPR_TIMESPAN)));
+ context.TryCancel();
+ });
+ Status s = stub_->Echo(&context, request, &response);
+ if (!s.ok()) {
+ EXPECT_TRUE(s.error_code() == StatusCode::CANCELLED ||
+ s.error_code() == StatusCode::UNAVAILABLE);
+ }
+ cancel_thread.join();
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedAuthMetadataPluginWithDeadlineCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(
- TestMetadataCredentialsPlugin::kGoodMetadataKey,
- "Does not matter, will fail anyway (see 3rd param)", false, false,
- 0))));
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
- EXPECT_EQ(s.error_message(),
+}
+
+TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(
+ TestMetadataCredentialsPlugin::kGoodMetadataKey,
+ "Does not matter, will fail anyway (see 3rd param)", false, false,
+ 0))));
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
+ EXPECT_EQ(s.error_message(),
TString("Getting metadata from plugin failed with error: ") +
- kTestCredsPluginErrorMsg);
+ kTestCredsPluginErrorMsg);
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedNonBlockingAuthMetadataPluginFailureCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
- MAYBE_SKIP_TEST;
- auto* processor = new TestAuthMetadataProcessor(false);
- StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(processor->GetCompatibleClientCreds());
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
- request.mutable_param()->set_expected_client_identity(
- TestAuthMetadataProcessor::kGoodGuy);
- request.mutable_param()->set_expected_transport_security_type(
- GetParam().credentials_type);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
-
- // Metadata should have been consumed by the processor.
- EXPECT_FALSE(MetadataContains(
- context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY,
+}
+
+TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
+ MAYBE_SKIP_TEST;
+ auto* processor = new TestAuthMetadataProcessor(false);
+ StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(processor->GetCompatibleClientCreds());
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+ request.mutable_param()->set_expected_client_identity(
+ TestAuthMetadataProcessor::kGoodGuy);
+ request.mutable_param()->set_expected_transport_security_type(
+ GetParam().credentials_type);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+
+ // Metadata should have been consumed by the processor.
+ EXPECT_FALSE(MetadataContains(
+ context.GetServerTrailingMetadata(), GRPC_AUTHORIZATION_METADATA_KEY,
TString("Bearer ") + TestAuthMetadataProcessor::kGoodGuy));
EXPECT_EQ(
context.credentials()->DebugString(),
kExpectedNonBlockingAuthMetadataPluginAndProcessorSuccessCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
- MAYBE_SKIP_TEST;
- auto* processor = new TestAuthMetadataProcessor(false);
- StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(processor->GetIncompatibleClientCreds());
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
+}
+
+TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
+ MAYBE_SKIP_TEST;
+ auto* processor = new TestAuthMetadataProcessor(false);
+ StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(processor->GetIncompatibleClientCreds());
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNAUTHENTICATED);
EXPECT_EQ(
context.credentials()->DebugString(),
kExpectedNonBlockingAuthMetadataPluginAndProcessorFailureCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_credentials(grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(
- TestMetadataCredentialsPlugin::kGoodMetadataKey,
- "Does not matter, will fail anyway (see 3rd param)", true, false,
- 0))));
- request.set_message("Hello");
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
- EXPECT_EQ(s.error_message(),
+}
+
+TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_credentials(grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(
+ TestMetadataCredentialsPlugin::kGoodMetadataKey,
+ "Does not matter, will fail anyway (see 3rd param)", true, false,
+ 0))));
+ request.set_message("Hello");
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNAVAILABLE);
+ EXPECT_EQ(s.error_message(),
TString("Getting metadata from plugin failed with error: ") +
- kTestCredsPluginErrorMsg);
+ kTestCredsPluginErrorMsg);
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedBlockingAuthMetadataPluginFailureCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, CompositeCallCreds) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- const char kMetadataKey1[] = "call-creds-key1";
- const char kMetadataKey2[] = "call-creds-key2";
- const char kMetadataVal1[] = "call-creds-val1";
- const char kMetadataVal2[] = "call-creds-val2";
-
- context.set_credentials(grpc::CompositeCallCredentials(
- grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(kMetadataKey1, kMetadataVal1,
- true, true, 0))),
- grpc::MetadataCredentialsFromPlugin(
- std::unique_ptr<MetadataCredentialsPlugin>(
- new TestMetadataCredentialsPlugin(kMetadataKey2, kMetadataVal2,
- true, true, 0)))));
- request.set_message("Hello");
- request.mutable_param()->set_echo_metadata(true);
-
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_TRUE(s.ok());
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- kMetadataKey1, kMetadataVal1));
- EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
- kMetadataKey2, kMetadataVal2));
+}
+
+TEST_P(SecureEnd2endTest, CompositeCallCreds) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ const char kMetadataKey1[] = "call-creds-key1";
+ const char kMetadataKey2[] = "call-creds-key2";
+ const char kMetadataVal1[] = "call-creds-val1";
+ const char kMetadataVal2[] = "call-creds-val2";
+
+ context.set_credentials(grpc::CompositeCallCredentials(
+ grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(kMetadataKey1, kMetadataVal1,
+ true, true, 0))),
+ grpc::MetadataCredentialsFromPlugin(
+ std::unique_ptr<MetadataCredentialsPlugin>(
+ new TestMetadataCredentialsPlugin(kMetadataKey2, kMetadataVal2,
+ true, true, 0)))));
+ request.set_message("Hello");
+ request.mutable_param()->set_echo_metadata(true);
+
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_TRUE(s.ok());
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ kMetadataKey1, kMetadataVal1));
+ EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(),
+ kMetadataKey2, kMetadataVal2));
EXPECT_EQ(context.credentials()->DebugString(),
kExpectedCompositeCallCredsDebugString);
-}
-
-TEST_P(SecureEnd2endTest, ClientAuthContext) {
- MAYBE_SKIP_TEST;
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- request.mutable_param()->set_check_auth_context(GetParam().credentials_type ==
- kTlsCredentialsType);
- request.mutable_param()->set_expected_transport_security_type(
- GetParam().credentials_type);
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-
- std::shared_ptr<const AuthContext> auth_ctx = context.auth_context();
- std::vector<grpc::string_ref> tst =
- auth_ctx->FindPropertyValues("transport_security_type");
- ASSERT_EQ(1u, tst.size());
- EXPECT_EQ(GetParam().credentials_type, ToString(tst[0]));
- if (GetParam().credentials_type == kTlsCredentialsType) {
- EXPECT_EQ("x509_subject_alternative_name",
- auth_ctx->GetPeerIdentityPropertyName());
- EXPECT_EQ(4u, auth_ctx->GetPeerIdentity().size());
- EXPECT_EQ("*.test.google.fr", ToString(auth_ctx->GetPeerIdentity()[0]));
- EXPECT_EQ("waterzooi.test.google.be",
- ToString(auth_ctx->GetPeerIdentity()[1]));
- EXPECT_EQ("*.test.youtube.com", ToString(auth_ctx->GetPeerIdentity()[2]));
- EXPECT_EQ("192.168.1.3", ToString(auth_ctx->GetPeerIdentity()[3]));
- }
-}
-
-class ResourceQuotaEnd2endTest : public End2endTest {
- public:
- ResourceQuotaEnd2endTest()
- : server_resource_quota_("server_resource_quota") {}
-
- virtual void ConfigureServerBuilder(ServerBuilder* builder) override {
- builder->SetResourceQuota(server_resource_quota_);
- }
-
- private:
- ResourceQuota server_resource_quota_;
-};
-
-TEST_P(ResourceQuotaEnd2endTest, SimpleRequest) {
- MAYBE_SKIP_TEST;
- ResetStub();
-
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-}
-
-// TODO(vjpai): refactor arguments into a struct if it makes sense
-std::vector<TestScenario> CreateTestScenarios(bool use_proxy,
- bool test_insecure,
- bool test_secure,
- bool test_inproc,
- bool test_callback_server) {
- std::vector<TestScenario> scenarios;
+}
+
+TEST_P(SecureEnd2endTest, ClientAuthContext) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ request.mutable_param()->set_check_auth_context(GetParam().credentials_type ==
+ kTlsCredentialsType);
+ request.mutable_param()->set_expected_transport_security_type(
+ GetParam().credentials_type);
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+
+ std::shared_ptr<const AuthContext> auth_ctx = context.auth_context();
+ std::vector<grpc::string_ref> tst =
+ auth_ctx->FindPropertyValues("transport_security_type");
+ ASSERT_EQ(1u, tst.size());
+ EXPECT_EQ(GetParam().credentials_type, ToString(tst[0]));
+ if (GetParam().credentials_type == kTlsCredentialsType) {
+ EXPECT_EQ("x509_subject_alternative_name",
+ auth_ctx->GetPeerIdentityPropertyName());
+ EXPECT_EQ(4u, auth_ctx->GetPeerIdentity().size());
+ EXPECT_EQ("*.test.google.fr", ToString(auth_ctx->GetPeerIdentity()[0]));
+ EXPECT_EQ("waterzooi.test.google.be",
+ ToString(auth_ctx->GetPeerIdentity()[1]));
+ EXPECT_EQ("*.test.youtube.com", ToString(auth_ctx->GetPeerIdentity()[2]));
+ EXPECT_EQ("192.168.1.3", ToString(auth_ctx->GetPeerIdentity()[3]));
+ }
+}
+
+class ResourceQuotaEnd2endTest : public End2endTest {
+ public:
+ ResourceQuotaEnd2endTest()
+ : server_resource_quota_("server_resource_quota") {}
+
+ virtual void ConfigureServerBuilder(ServerBuilder* builder) override {
+ builder->SetResourceQuota(server_resource_quota_);
+ }
+
+ private:
+ ResourceQuota server_resource_quota_;
+};
+
+TEST_P(ResourceQuotaEnd2endTest, SimpleRequest) {
+ MAYBE_SKIP_TEST;
+ ResetStub();
+
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+}
+
+// TODO(vjpai): refactor arguments into a struct if it makes sense
+std::vector<TestScenario> CreateTestScenarios(bool use_proxy,
+ bool test_insecure,
+ bool test_secure,
+ bool test_inproc,
+ bool test_callback_server) {
+ std::vector<TestScenario> scenarios;
std::vector<TString> credentials_types;
-
+
GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms,
kClientChannelBackupPollIntervalMs);
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
-
- if (test_secure) {
- credentials_types =
- GetCredentialsProvider()->GetSecureCredentialsTypeList();
- }
- auto insec_ok = [] {
- // Only allow insecure credentials type when it is registered with the
- // provider. User may create providers that do not have insecure.
- return GetCredentialsProvider()->GetChannelCredentials(
- kInsecureCredentialsType, nullptr) != nullptr;
- };
- if (test_insecure && insec_ok()) {
- credentials_types.push_back(kInsecureCredentialsType);
- }
-
- // Test callback with inproc or if the event-engine allows it
- GPR_ASSERT(!credentials_types.empty());
- for (const auto& cred : credentials_types) {
- scenarios.emplace_back(false, false, false, cred, false);
- scenarios.emplace_back(true, false, false, cred, false);
- if (test_callback_server) {
- // Note that these scenarios will be dynamically disabled if the event
- // engine doesn't run in the background
- scenarios.emplace_back(false, false, false, cred, true);
- scenarios.emplace_back(true, false, false, cred, true);
- }
- if (use_proxy) {
- scenarios.emplace_back(false, true, false, cred, false);
- scenarios.emplace_back(true, true, false, cred, false);
- }
- }
- if (test_inproc && insec_ok()) {
- scenarios.emplace_back(false, false, true, kInsecureCredentialsType, false);
- scenarios.emplace_back(true, false, true, kInsecureCredentialsType, false);
- if (test_callback_server) {
- scenarios.emplace_back(false, false, true, kInsecureCredentialsType,
- true);
- scenarios.emplace_back(true, false, true, kInsecureCredentialsType, true);
- }
- }
- return scenarios;
-}
-
-INSTANTIATE_TEST_SUITE_P(
- End2end, End2endTest,
- ::testing::ValuesIn(CreateTestScenarios(false, true, true, true, true)));
-
-INSTANTIATE_TEST_SUITE_P(
- End2endServerTryCancel, End2endServerTryCancelTest,
- ::testing::ValuesIn(CreateTestScenarios(false, true, true, true, true)));
-
-INSTANTIATE_TEST_SUITE_P(
- ProxyEnd2end, ProxyEnd2endTest,
- ::testing::ValuesIn(CreateTestScenarios(true, true, true, true, true)));
-
-INSTANTIATE_TEST_SUITE_P(
- SecureEnd2end, SecureEnd2endTest,
- ::testing::ValuesIn(CreateTestScenarios(false, false, true, false, true)));
-
-INSTANTIATE_TEST_SUITE_P(
- ResourceQuotaEnd2end, ResourceQuotaEnd2endTest,
- ::testing::ValuesIn(CreateTestScenarios(false, true, true, true, true)));
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- int ret = RUN_ALL_TESTS();
- return ret;
-}
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+
+ if (test_secure) {
+ credentials_types =
+ GetCredentialsProvider()->GetSecureCredentialsTypeList();
+ }
+ auto insec_ok = [] {
+ // Only allow insecure credentials type when it is registered with the
+ // provider. User may create providers that do not have insecure.
+ return GetCredentialsProvider()->GetChannelCredentials(
+ kInsecureCredentialsType, nullptr) != nullptr;
+ };
+ if (test_insecure && insec_ok()) {
+ credentials_types.push_back(kInsecureCredentialsType);
+ }
+
+ // Test callback with inproc or if the event-engine allows it
+ GPR_ASSERT(!credentials_types.empty());
+ for (const auto& cred : credentials_types) {
+ scenarios.emplace_back(false, false, false, cred, false);
+ scenarios.emplace_back(true, false, false, cred, false);
+ if (test_callback_server) {
+ // Note that these scenarios will be dynamically disabled if the event
+ // engine doesn't run in the background
+ scenarios.emplace_back(false, false, false, cred, true);
+ scenarios.emplace_back(true, false, false, cred, true);
+ }
+ if (use_proxy) {
+ scenarios.emplace_back(false, true, false, cred, false);
+ scenarios.emplace_back(true, true, false, cred, false);
+ }
+ }
+ if (test_inproc && insec_ok()) {
+ scenarios.emplace_back(false, false, true, kInsecureCredentialsType, false);
+ scenarios.emplace_back(true, false, true, kInsecureCredentialsType, false);
+ if (test_callback_server) {
+ scenarios.emplace_back(false, false, true, kInsecureCredentialsType,
+ true);
+ scenarios.emplace_back(true, false, true, kInsecureCredentialsType, true);
+ }
+ }
+ return scenarios;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ End2end, End2endTest,
+ ::testing::ValuesIn(CreateTestScenarios(false, true, true, true, true)));
+
+INSTANTIATE_TEST_SUITE_P(
+ End2endServerTryCancel, End2endServerTryCancelTest,
+ ::testing::ValuesIn(CreateTestScenarios(false, true, true, true, true)));
+
+INSTANTIATE_TEST_SUITE_P(
+ ProxyEnd2end, ProxyEnd2endTest,
+ ::testing::ValuesIn(CreateTestScenarios(true, true, true, true, true)));
+
+INSTANTIATE_TEST_SUITE_P(
+ SecureEnd2end, SecureEnd2endTest,
+ ::testing::ValuesIn(CreateTestScenarios(false, false, true, false, true)));
+
+INSTANTIATE_TEST_SUITE_P(
+ ResourceQuotaEnd2end, ResourceQuotaEnd2endTest,
+ ::testing::ValuesIn(CreateTestScenarios(false, true, true, true, true)));
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int ret = RUN_ALL_TESTS();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/exception_test.cc b/contrib/libs/grpc/test/cpp/end2end/exception_test.cc
index da342f1dfd..cd29eb8a10 100644
--- a/contrib/libs/grpc/test/cpp/end2end/exception_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/exception_test.cc
@@ -1,123 +1,123 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <exception>
-#include <memory>
-
-#include <grpc/impl/codegen/port_platform.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/test_config.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-
-const char* kErrorMessage = "This service caused an exception";
-
-#if GRPC_ALLOW_EXCEPTIONS
-class ExceptingServiceImpl : public ::grpc::testing::EchoTestService::Service {
- public:
- Status Echo(ServerContext* /*server_context*/, const EchoRequest* /*request*/,
- EchoResponse* /*response*/) override {
- throw - 1;
- }
- Status RequestStream(ServerContext* /*context*/,
- ServerReader<EchoRequest>* /*reader*/,
- EchoResponse* /*response*/) override {
- throw ServiceException();
- }
-
- private:
- class ServiceException final : public std::exception {
- public:
- ServiceException() {}
-
- private:
- const char* what() const noexcept override { return kErrorMessage; }
- };
-};
-
-class ExceptionTest : public ::testing::Test {
- protected:
- ExceptionTest() {}
-
- void SetUp() override {
- ServerBuilder builder;
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- void TearDown() override { server_->Shutdown(); }
-
- void ResetStub() {
- channel_ = server_->InProcessChannel(ChannelArguments());
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- ExceptingServiceImpl service_;
-};
-
-TEST_F(ExceptionTest, Unary) {
- ResetStub();
- EchoRequest request;
- EchoResponse response;
- request.set_message("test");
-
- for (int i = 0; i < 10; i++) {
- ClientContext context;
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNKNOWN);
- }
-}
-
-TEST_F(ExceptionTest, RequestStream) {
- ResetStub();
- EchoResponse response;
-
- for (int i = 0; i < 10; i++) {
- ClientContext context;
- auto stream = stub_->RequestStream(&context, &response);
- stream->WritesDone();
- Status s = stream->Finish();
-
- EXPECT_FALSE(s.ok());
- EXPECT_EQ(s.error_code(), StatusCode::UNKNOWN);
- }
-}
-
-#endif // GRPC_ALLOW_EXCEPTIONS
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <exception>
+#include <memory>
+
+#include <grpc/impl/codegen/port_platform.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/test_config.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+
+const char* kErrorMessage = "This service caused an exception";
+
+#if GRPC_ALLOW_EXCEPTIONS
+class ExceptingServiceImpl : public ::grpc::testing::EchoTestService::Service {
+ public:
+ Status Echo(ServerContext* /*server_context*/, const EchoRequest* /*request*/,
+ EchoResponse* /*response*/) override {
+ throw - 1;
+ }
+ Status RequestStream(ServerContext* /*context*/,
+ ServerReader<EchoRequest>* /*reader*/,
+ EchoResponse* /*response*/) override {
+ throw ServiceException();
+ }
+
+ private:
+ class ServiceException final : public std::exception {
+ public:
+ ServiceException() {}
+
+ private:
+ const char* what() const noexcept override { return kErrorMessage; }
+ };
+};
+
+class ExceptionTest : public ::testing::Test {
+ protected:
+ ExceptionTest() {}
+
+ void SetUp() override {
+ ServerBuilder builder;
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ void TearDown() override { server_->Shutdown(); }
+
+ void ResetStub() {
+ channel_ = server_->InProcessChannel(ChannelArguments());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ ExceptingServiceImpl service_;
+};
+
+TEST_F(ExceptionTest, Unary) {
+ ResetStub();
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("test");
+
+ for (int i = 0; i < 10; i++) {
+ ClientContext context;
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNKNOWN);
+ }
+}
+
+TEST_F(ExceptionTest, RequestStream) {
+ ResetStub();
+ EchoResponse response;
+
+ for (int i = 0; i < 10; i++) {
+ ClientContext context;
+ auto stream = stub_->RequestStream(&context, &response);
+ stream->WritesDone();
+ Status s = stream->Finish();
+
+ EXPECT_FALSE(s.ok());
+ EXPECT_EQ(s.error_code(), StatusCode::UNKNOWN);
+ }
+}
+
+#endif // GRPC_ALLOW_EXCEPTIONS
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
index d4724f4ce7..2f26d0716c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/filter_end2end_test.cc
@@ -1,346 +1,346 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-#include <mutex>
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+#include <mutex>
#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/generic/async_generic_service.h>
-#include <grpcpp/generic/generic_stub.h>
-#include <grpcpp/impl/codegen/proto_utils.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-#include <grpcpp/support/config.h>
-#include <grpcpp/support/slice.h>
-
-#include "src/cpp/common/channel_filter.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
-namespace grpc {
-namespace testing {
-namespace {
-
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
-
-void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
- bool ok;
- void* got_tag;
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- EXPECT_EQ(expect_ok, ok);
- EXPECT_EQ(tag(i), got_tag);
-}
-
-namespace {
-
-int global_num_connections = 0;
-int global_num_calls = 0;
-std::mutex global_mu;
-
-void IncrementConnectionCounter() {
- std::unique_lock<std::mutex> lock(global_mu);
- ++global_num_connections;
-}
-
-void ResetConnectionCounter() {
- std::unique_lock<std::mutex> lock(global_mu);
- global_num_connections = 0;
-}
-
-int GetConnectionCounterValue() {
- std::unique_lock<std::mutex> lock(global_mu);
- return global_num_connections;
-}
-
-void IncrementCallCounter() {
- std::unique_lock<std::mutex> lock(global_mu);
- ++global_num_calls;
-}
-
-void ResetCallCounter() {
- std::unique_lock<std::mutex> lock(global_mu);
- global_num_calls = 0;
-}
-
-int GetCallCounterValue() {
- std::unique_lock<std::mutex> lock(global_mu);
- return global_num_calls;
-}
-
-} // namespace
-
-class ChannelDataImpl : public ChannelData {
- public:
- grpc_error* Init(grpc_channel_element* /*elem*/,
- grpc_channel_element_args* /*args*/) {
- IncrementConnectionCounter();
- return GRPC_ERROR_NONE;
- }
-};
-
-class CallDataImpl : public CallData {
- public:
- void StartTransportStreamOpBatch(grpc_call_element* elem,
- TransportStreamOpBatch* op) override {
- // Incrementing the counter could be done from Init(), but we want
- // to test that the individual methods are actually called correctly.
- if (op->recv_initial_metadata() != nullptr) IncrementCallCounter();
- grpc_call_next_op(elem, op->op());
- }
-};
-
-class FilterEnd2endTest : public ::testing::Test {
- protected:
- FilterEnd2endTest() : server_host_("localhost") {}
-
- static void SetUpTestCase() {
- // Workaround for
- // https://github.com/google/google-toolbox-for-mac/issues/242
- static bool setup_done = false;
- if (!setup_done) {
- setup_done = true;
- grpc::RegisterChannelFilter<ChannelDataImpl, CallDataImpl>(
- "test-filter", GRPC_SERVER_CHANNEL, INT_MAX, nullptr);
- }
- }
-
- void SetUp() override {
- int port = grpc_pick_unused_port_or_die();
- server_address_ << server_host_ << ":" << port;
- // Setup server
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- InsecureServerCredentials());
- builder.RegisterAsyncGenericService(&generic_service_);
- srv_cq_ = builder.AddCompletionQueue();
- server_ = builder.BuildAndStart();
- }
-
- void TearDown() override {
- server_->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- cli_cq_.Shutdown();
- srv_cq_->Shutdown();
- while (cli_cq_.Next(&ignored_tag, &ignored_ok))
- ;
- while (srv_cq_->Next(&ignored_tag, &ignored_ok))
- ;
- }
-
- void ResetStub() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- generic_stub_.reset(new GenericStub(channel));
- ResetConnectionCounter();
- ResetCallCounter();
- }
-
- void server_ok(int i) { verify_ok(srv_cq_.get(), i, true); }
- void client_ok(int i) { verify_ok(&cli_cq_, i, true); }
- void server_fail(int i) { verify_ok(srv_cq_.get(), i, false); }
- void client_fail(int i) { verify_ok(&cli_cq_, i, false); }
-
- void SendRpc(int num_rpcs) {
+
+#include <grpc/grpc.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/generic/async_generic_service.h>
+#include <grpcpp/generic/generic_stub.h>
+#include <grpcpp/impl/codegen/proto_utils.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/config.h>
+#include <grpcpp/support/slice.h>
+
+#include "src/cpp/common/channel_filter.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+
+void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
+ bool ok;
+ void* got_tag;
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ EXPECT_EQ(expect_ok, ok);
+ EXPECT_EQ(tag(i), got_tag);
+}
+
+namespace {
+
+int global_num_connections = 0;
+int global_num_calls = 0;
+std::mutex global_mu;
+
+void IncrementConnectionCounter() {
+ std::unique_lock<std::mutex> lock(global_mu);
+ ++global_num_connections;
+}
+
+void ResetConnectionCounter() {
+ std::unique_lock<std::mutex> lock(global_mu);
+ global_num_connections = 0;
+}
+
+int GetConnectionCounterValue() {
+ std::unique_lock<std::mutex> lock(global_mu);
+ return global_num_connections;
+}
+
+void IncrementCallCounter() {
+ std::unique_lock<std::mutex> lock(global_mu);
+ ++global_num_calls;
+}
+
+void ResetCallCounter() {
+ std::unique_lock<std::mutex> lock(global_mu);
+ global_num_calls = 0;
+}
+
+int GetCallCounterValue() {
+ std::unique_lock<std::mutex> lock(global_mu);
+ return global_num_calls;
+}
+
+} // namespace
+
+class ChannelDataImpl : public ChannelData {
+ public:
+ grpc_error* Init(grpc_channel_element* /*elem*/,
+ grpc_channel_element_args* /*args*/) {
+ IncrementConnectionCounter();
+ return GRPC_ERROR_NONE;
+ }
+};
+
+class CallDataImpl : public CallData {
+ public:
+ void StartTransportStreamOpBatch(grpc_call_element* elem,
+ TransportStreamOpBatch* op) override {
+ // Incrementing the counter could be done from Init(), but we want
+ // to test that the individual methods are actually called correctly.
+ if (op->recv_initial_metadata() != nullptr) IncrementCallCounter();
+ grpc_call_next_op(elem, op->op());
+ }
+};
+
+class FilterEnd2endTest : public ::testing::Test {
+ protected:
+ FilterEnd2endTest() : server_host_("localhost") {}
+
+ static void SetUpTestCase() {
+ // Workaround for
+ // https://github.com/google/google-toolbox-for-mac/issues/242
+ static bool setup_done = false;
+ if (!setup_done) {
+ setup_done = true;
+ grpc::RegisterChannelFilter<ChannelDataImpl, CallDataImpl>(
+ "test-filter", GRPC_SERVER_CHANNEL, INT_MAX, nullptr);
+ }
+ }
+
+ void SetUp() override {
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ << server_host_ << ":" << port;
+ // Setup server
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ InsecureServerCredentials());
+ builder.RegisterAsyncGenericService(&generic_service_);
+ srv_cq_ = builder.AddCompletionQueue();
+ server_ = builder.BuildAndStart();
+ }
+
+ void TearDown() override {
+ server_->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ cli_cq_.Shutdown();
+ srv_cq_->Shutdown();
+ while (cli_cq_.Next(&ignored_tag, &ignored_ok))
+ ;
+ while (srv_cq_->Next(&ignored_tag, &ignored_ok))
+ ;
+ }
+
+ void ResetStub() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ generic_stub_.reset(new GenericStub(channel));
+ ResetConnectionCounter();
+ ResetCallCounter();
+ }
+
+ void server_ok(int i) { verify_ok(srv_cq_.get(), i, true); }
+ void client_ok(int i) { verify_ok(&cli_cq_, i, true); }
+ void server_fail(int i) { verify_ok(srv_cq_.get(), i, false); }
+ void client_fail(int i) { verify_ok(&cli_cq_, i, false); }
+
+ void SendRpc(int num_rpcs) {
const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter stream(&srv_ctx);
-
- // The string needs to be long enough to test heap-based slice.
- send_request.set_message("Hello world. Hello world. Hello world.");
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter stream(&srv_ctx);
+
+ // The string needs to be long enough to test heap-based slice.
+ send_request.set_message("Hello world. Hello world. Hello world.");
std::thread request_call([this]() { server_ok(4); });
- std::unique_ptr<GenericClientAsyncReaderWriter> call =
- generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
- call->StartCall(tag(1));
- client_ok(1);
- std::unique_ptr<ByteBuffer> send_buffer =
- SerializeToByteBuffer(&send_request);
- call->Write(*send_buffer, tag(2));
- // Send ByteBuffer can be destroyed after calling Write.
- send_buffer.reset();
- client_ok(2);
- call->WritesDone(tag(3));
- client_ok(3);
-
- generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
- srv_cq_.get(), tag(4));
-
+ std::unique_ptr<GenericClientAsyncReaderWriter> call =
+ generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
+ call->StartCall(tag(1));
+ client_ok(1);
+ std::unique_ptr<ByteBuffer> send_buffer =
+ SerializeToByteBuffer(&send_request);
+ call->Write(*send_buffer, tag(2));
+ // Send ByteBuffer can be destroyed after calling Write.
+ send_buffer.reset();
+ client_ok(2);
+ call->WritesDone(tag(3));
+ client_ok(3);
+
+ generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
+ srv_cq_.get(), tag(4));
+
request_call.join();
- EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
- EXPECT_EQ(kMethodName, srv_ctx.method());
- ByteBuffer recv_buffer;
- stream.Read(&recv_buffer, tag(5));
- server_ok(5);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- send_buffer = SerializeToByteBuffer(&send_response);
- stream.Write(*send_buffer, tag(6));
- send_buffer.reset();
- server_ok(6);
-
- stream.Finish(Status::OK, tag(7));
- server_ok(7);
-
- recv_buffer.Clear();
- call->Read(&recv_buffer, tag(8));
- client_ok(8);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
-
- call->Finish(&recv_status, tag(9));
- client_ok(9);
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
- }
-
- CompletionQueue cli_cq_;
- std::unique_ptr<ServerCompletionQueue> srv_cq_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<grpc::GenericStub> generic_stub_;
- std::unique_ptr<Server> server_;
- AsyncGenericService generic_service_;
+ EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
+ EXPECT_EQ(kMethodName, srv_ctx.method());
+ ByteBuffer recv_buffer;
+ stream.Read(&recv_buffer, tag(5));
+ server_ok(5);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ send_buffer = SerializeToByteBuffer(&send_response);
+ stream.Write(*send_buffer, tag(6));
+ send_buffer.reset();
+ server_ok(6);
+
+ stream.Finish(Status::OK, tag(7));
+ server_ok(7);
+
+ recv_buffer.Clear();
+ call->Read(&recv_buffer, tag(8));
+ client_ok(8);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
+
+ call->Finish(&recv_status, tag(9));
+ client_ok(9);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+ }
+
+ CompletionQueue cli_cq_;
+ std::unique_ptr<ServerCompletionQueue> srv_cq_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<grpc::GenericStub> generic_stub_;
+ std::unique_ptr<Server> server_;
+ AsyncGenericService generic_service_;
const TString server_host_;
- std::ostringstream server_address_;
-};
-
-TEST_F(FilterEnd2endTest, SimpleRpc) {
- ResetStub();
- EXPECT_EQ(0, GetConnectionCounterValue());
- EXPECT_EQ(0, GetCallCounterValue());
- SendRpc(1);
- EXPECT_EQ(1, GetConnectionCounterValue());
- EXPECT_EQ(1, GetCallCounterValue());
-}
-
-TEST_F(FilterEnd2endTest, SequentialRpcs) {
- ResetStub();
- EXPECT_EQ(0, GetConnectionCounterValue());
- EXPECT_EQ(0, GetCallCounterValue());
- SendRpc(10);
- EXPECT_EQ(1, GetConnectionCounterValue());
- EXPECT_EQ(10, GetCallCounterValue());
-}
-
-// One ping, one pong.
-TEST_F(FilterEnd2endTest, SimpleBidiStreaming) {
- ResetStub();
- EXPECT_EQ(0, GetConnectionCounterValue());
- EXPECT_EQ(0, GetCallCounterValue());
-
+ std::ostringstream server_address_;
+};
+
+TEST_F(FilterEnd2endTest, SimpleRpc) {
+ ResetStub();
+ EXPECT_EQ(0, GetConnectionCounterValue());
+ EXPECT_EQ(0, GetCallCounterValue());
+ SendRpc(1);
+ EXPECT_EQ(1, GetConnectionCounterValue());
+ EXPECT_EQ(1, GetCallCounterValue());
+}
+
+TEST_F(FilterEnd2endTest, SequentialRpcs) {
+ ResetStub();
+ EXPECT_EQ(0, GetConnectionCounterValue());
+ EXPECT_EQ(0, GetCallCounterValue());
+ SendRpc(10);
+ EXPECT_EQ(1, GetConnectionCounterValue());
+ EXPECT_EQ(10, GetCallCounterValue());
+}
+
+// One ping, one pong.
+TEST_F(FilterEnd2endTest, SimpleBidiStreaming) {
+ ResetStub();
+ EXPECT_EQ(0, GetConnectionCounterValue());
+ EXPECT_EQ(0, GetCallCounterValue());
+
const TString kMethodName(
- "/grpc.cpp.test.util.EchoTestService/BidiStream");
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter srv_stream(&srv_ctx);
-
- cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
- send_request.set_message("Hello");
+ "/grpc.cpp.test.util.EchoTestService/BidiStream");
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter srv_stream(&srv_ctx);
+
+ cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+ send_request.set_message("Hello");
std::thread request_call([this]() { server_ok(2); });
- std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream =
- generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
- cli_stream->StartCall(tag(1));
- client_ok(1);
-
- generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(),
- srv_cq_.get(), tag(2));
-
+ std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream =
+ generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
+ cli_stream->StartCall(tag(1));
+ client_ok(1);
+
+ generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(),
+ srv_cq_.get(), tag(2));
+
request_call.join();
- EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
- EXPECT_EQ(kMethodName, srv_ctx.method());
-
- std::unique_ptr<ByteBuffer> send_buffer =
- SerializeToByteBuffer(&send_request);
- cli_stream->Write(*send_buffer, tag(3));
- send_buffer.reset();
- client_ok(3);
-
- ByteBuffer recv_buffer;
- srv_stream.Read(&recv_buffer, tag(4));
- server_ok(4);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- send_buffer = SerializeToByteBuffer(&send_response);
- srv_stream.Write(*send_buffer, tag(5));
- send_buffer.reset();
- server_ok(5);
-
- cli_stream->Read(&recv_buffer, tag(6));
- client_ok(6);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->WritesDone(tag(7));
- client_ok(7);
-
- srv_stream.Read(&recv_buffer, tag(8));
- server_fail(8);
-
- srv_stream.Finish(Status::OK, tag(9));
- server_ok(9);
-
- cli_stream->Finish(&recv_status, tag(10));
- client_ok(10);
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-
- EXPECT_EQ(1, GetCallCounterValue());
- EXPECT_EQ(1, GetConnectionCounterValue());
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
+ EXPECT_EQ(kMethodName, srv_ctx.method());
+
+ std::unique_ptr<ByteBuffer> send_buffer =
+ SerializeToByteBuffer(&send_request);
+ cli_stream->Write(*send_buffer, tag(3));
+ send_buffer.reset();
+ client_ok(3);
+
+ ByteBuffer recv_buffer;
+ srv_stream.Read(&recv_buffer, tag(4));
+ server_ok(4);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ send_buffer = SerializeToByteBuffer(&send_response);
+ srv_stream.Write(*send_buffer, tag(5));
+ send_buffer.reset();
+ server_ok(5);
+
+ cli_stream->Read(&recv_buffer, tag(6));
+ client_ok(6);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->WritesDone(tag(7));
+ client_ok(7);
+
+ srv_stream.Read(&recv_buffer, tag(8));
+ server_fail(8);
+
+ srv_stream.Finish(Status::OK, tag(9));
+ server_ok(9);
+
+ cli_stream->Finish(&recv_status, tag(10));
+ client_ok(10);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+
+ EXPECT_EQ(1, GetCallCounterValue());
+ EXPECT_EQ(1, GetConnectionCounterValue());
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
index cbe9289302..59eec49fb2 100644
--- a/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/generic_end2end_test.cc
@@ -1,430 +1,430 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/generic/async_generic_service.h>
-#include <grpcpp/generic/generic_stub.h>
-#include <grpcpp/impl/codegen/proto_utils.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-#include <grpcpp/support/slice.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
-namespace grpc {
-namespace testing {
-namespace {
-
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
-
-void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
- bool ok;
- void* got_tag;
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- EXPECT_EQ(expect_ok, ok);
- EXPECT_EQ(tag(i), got_tag);
-}
-
-class GenericEnd2endTest : public ::testing::Test {
- protected:
- GenericEnd2endTest() : server_host_("localhost") {}
-
- void SetUp() override {
- shut_down_ = false;
- int port = grpc_pick_unused_port_or_die();
- server_address_ << server_host_ << ":" << port;
- // Setup server
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- InsecureServerCredentials());
- builder.RegisterAsyncGenericService(&generic_service_);
- // Include a second call to RegisterAsyncGenericService to make sure that
- // we get an error in the log, since it is not allowed to have 2 async
- // generic services
- builder.RegisterAsyncGenericService(&generic_service_);
- srv_cq_ = builder.AddCompletionQueue();
- server_ = builder.BuildAndStart();
- }
-
- void ShutDownServerAndCQs() {
- if (!shut_down_) {
- server_->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- cli_cq_.Shutdown();
- srv_cq_->Shutdown();
- while (cli_cq_.Next(&ignored_tag, &ignored_ok))
- ;
- while (srv_cq_->Next(&ignored_tag, &ignored_ok))
- ;
- shut_down_ = true;
- }
- }
- void TearDown() override { ShutDownServerAndCQs(); }
-
- void ResetStub() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- generic_stub_.reset(new GenericStub(channel));
- }
-
- void server_ok(int i) { verify_ok(srv_cq_.get(), i, true); }
- void client_ok(int i) { verify_ok(&cli_cq_, i, true); }
- void server_fail(int i) { verify_ok(srv_cq_.get(), i, false); }
- void client_fail(int i) { verify_ok(&cli_cq_, i, false); }
-
- void SendRpc(int num_rpcs) {
- SendRpc(num_rpcs, false, gpr_inf_future(GPR_CLOCK_MONOTONIC));
- }
-
- void SendRpc(int num_rpcs, bool check_deadline, gpr_timespec deadline) {
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/generic/async_generic_service.h>
+#include <grpcpp/generic/generic_stub.h>
+#include <grpcpp/impl/codegen/proto_utils.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/slice.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+
+void verify_ok(CompletionQueue* cq, int i, bool expect_ok) {
+ bool ok;
+ void* got_tag;
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ EXPECT_EQ(expect_ok, ok);
+ EXPECT_EQ(tag(i), got_tag);
+}
+
+class GenericEnd2endTest : public ::testing::Test {
+ protected:
+ GenericEnd2endTest() : server_host_("localhost") {}
+
+ void SetUp() override {
+ shut_down_ = false;
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ << server_host_ << ":" << port;
+ // Setup server
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ InsecureServerCredentials());
+ builder.RegisterAsyncGenericService(&generic_service_);
+ // Include a second call to RegisterAsyncGenericService to make sure that
+ // we get an error in the log, since it is not allowed to have 2 async
+ // generic services
+ builder.RegisterAsyncGenericService(&generic_service_);
+ srv_cq_ = builder.AddCompletionQueue();
+ server_ = builder.BuildAndStart();
+ }
+
+ void ShutDownServerAndCQs() {
+ if (!shut_down_) {
+ server_->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ cli_cq_.Shutdown();
+ srv_cq_->Shutdown();
+ while (cli_cq_.Next(&ignored_tag, &ignored_ok))
+ ;
+ while (srv_cq_->Next(&ignored_tag, &ignored_ok))
+ ;
+ shut_down_ = true;
+ }
+ }
+ void TearDown() override { ShutDownServerAndCQs(); }
+
+ void ResetStub() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ generic_stub_.reset(new GenericStub(channel));
+ }
+
+ void server_ok(int i) { verify_ok(srv_cq_.get(), i, true); }
+ void client_ok(int i) { verify_ok(&cli_cq_, i, true); }
+ void server_fail(int i) { verify_ok(srv_cq_.get(), i, false); }
+ void client_fail(int i) { verify_ok(&cli_cq_, i, false); }
+
+ void SendRpc(int num_rpcs) {
+ SendRpc(num_rpcs, false, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+ }
+
+ void SendRpc(int num_rpcs, bool check_deadline, gpr_timespec deadline) {
const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter stream(&srv_ctx);
-
- // The string needs to be long enough to test heap-based slice.
- send_request.set_message("Hello world. Hello world. Hello world.");
-
- if (check_deadline) {
- cli_ctx.set_deadline(deadline);
- }
-
- // Rather than using the original kMethodName, make a short-lived
- // copy to also confirm that we don't refer to this object beyond
- // the initial call preparation
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter stream(&srv_ctx);
+
+ // The string needs to be long enough to test heap-based slice.
+ send_request.set_message("Hello world. Hello world. Hello world.");
+
+ if (check_deadline) {
+ cli_ctx.set_deadline(deadline);
+ }
+
+ // Rather than using the original kMethodName, make a short-lived
+ // copy to also confirm that we don't refer to this object beyond
+ // the initial call preparation
const TString* method_name = new TString(kMethodName);
-
- std::unique_ptr<GenericClientAsyncReaderWriter> call =
- generic_stub_->PrepareCall(&cli_ctx, *method_name, &cli_cq_);
-
- delete method_name; // Make sure that this is not needed after invocation
-
+
+ std::unique_ptr<GenericClientAsyncReaderWriter> call =
+ generic_stub_->PrepareCall(&cli_ctx, *method_name, &cli_cq_);
+
+ delete method_name; // Make sure that this is not needed after invocation
+
std::thread request_call([this]() { server_ok(4); });
- call->StartCall(tag(1));
- client_ok(1);
- std::unique_ptr<ByteBuffer> send_buffer =
- SerializeToByteBuffer(&send_request);
- call->Write(*send_buffer, tag(2));
- // Send ByteBuffer can be destroyed after calling Write.
- send_buffer.reset();
- client_ok(2);
- call->WritesDone(tag(3));
- client_ok(3);
-
- generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
- srv_cq_.get(), tag(4));
-
+ call->StartCall(tag(1));
+ client_ok(1);
+ std::unique_ptr<ByteBuffer> send_buffer =
+ SerializeToByteBuffer(&send_request);
+ call->Write(*send_buffer, tag(2));
+ // Send ByteBuffer can be destroyed after calling Write.
+ send_buffer.reset();
+ client_ok(2);
+ call->WritesDone(tag(3));
+ client_ok(3);
+
+ generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
+ srv_cq_.get(), tag(4));
+
request_call.join();
- EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
- EXPECT_EQ(kMethodName, srv_ctx.method());
-
- if (check_deadline) {
- EXPECT_TRUE(gpr_time_similar(deadline, srv_ctx.raw_deadline(),
- gpr_time_from_millis(1000, GPR_TIMESPAN)));
- }
-
- ByteBuffer recv_buffer;
- stream.Read(&recv_buffer, tag(5));
- server_ok(5);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- send_buffer = SerializeToByteBuffer(&send_response);
- stream.Write(*send_buffer, tag(6));
- send_buffer.reset();
- server_ok(6);
-
- stream.Finish(Status::OK, tag(7));
- server_ok(7);
-
- recv_buffer.Clear();
- call->Read(&recv_buffer, tag(8));
- client_ok(8);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
-
- call->Finish(&recv_status, tag(9));
- client_ok(9);
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
- }
-
- // Return errors to up to one call that comes in on the supplied completion
- // queue, until the CQ is being shut down (and therefore we can no longer
- // enqueue further events).
- void DriveCompletionQueue() {
- enum class Event : uintptr_t {
- kCallReceived,
- kResponseSent,
- };
- // Request the call, but only if the main thread hasn't beaten us to
- // shutting down the CQ.
- grpc::GenericServerContext server_context;
- grpc::GenericServerAsyncReaderWriter reader_writer(&server_context);
-
- {
- std::lock_guard<std::mutex> lock(shutting_down_mu_);
- if (!shutting_down_) {
- generic_service_.RequestCall(
- &server_context, &reader_writer, srv_cq_.get(), srv_cq_.get(),
- reinterpret_cast<void*>(Event::kCallReceived));
- }
- }
- // Process events.
- {
- Event event;
- bool ok;
- while (srv_cq_->Next(reinterpret_cast<void**>(&event), &ok)) {
- std::lock_guard<std::mutex> lock(shutting_down_mu_);
- if (shutting_down_) {
- // The main thread has started shutting down. Simply continue to drain
- // events.
- continue;
- }
-
- switch (event) {
- case Event::kCallReceived:
- reader_writer.Finish(
- ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "go away"),
- reinterpret_cast<void*>(Event::kResponseSent));
- break;
-
- case Event::kResponseSent:
- // We are done.
- break;
- }
- }
- }
- }
-
- CompletionQueue cli_cq_;
- std::unique_ptr<ServerCompletionQueue> srv_cq_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<grpc::GenericStub> generic_stub_;
- std::unique_ptr<Server> server_;
- AsyncGenericService generic_service_;
+ EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
+ EXPECT_EQ(kMethodName, srv_ctx.method());
+
+ if (check_deadline) {
+ EXPECT_TRUE(gpr_time_similar(deadline, srv_ctx.raw_deadline(),
+ gpr_time_from_millis(1000, GPR_TIMESPAN)));
+ }
+
+ ByteBuffer recv_buffer;
+ stream.Read(&recv_buffer, tag(5));
+ server_ok(5);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ send_buffer = SerializeToByteBuffer(&send_response);
+ stream.Write(*send_buffer, tag(6));
+ send_buffer.reset();
+ server_ok(6);
+
+ stream.Finish(Status::OK, tag(7));
+ server_ok(7);
+
+ recv_buffer.Clear();
+ call->Read(&recv_buffer, tag(8));
+ client_ok(8);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
+
+ call->Finish(&recv_status, tag(9));
+ client_ok(9);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+ }
+
+ // Return errors to up to one call that comes in on the supplied completion
+ // queue, until the CQ is being shut down (and therefore we can no longer
+ // enqueue further events).
+ void DriveCompletionQueue() {
+ enum class Event : uintptr_t {
+ kCallReceived,
+ kResponseSent,
+ };
+ // Request the call, but only if the main thread hasn't beaten us to
+ // shutting down the CQ.
+ grpc::GenericServerContext server_context;
+ grpc::GenericServerAsyncReaderWriter reader_writer(&server_context);
+
+ {
+ std::lock_guard<std::mutex> lock(shutting_down_mu_);
+ if (!shutting_down_) {
+ generic_service_.RequestCall(
+ &server_context, &reader_writer, srv_cq_.get(), srv_cq_.get(),
+ reinterpret_cast<void*>(Event::kCallReceived));
+ }
+ }
+ // Process events.
+ {
+ Event event;
+ bool ok;
+ while (srv_cq_->Next(reinterpret_cast<void**>(&event), &ok)) {
+ std::lock_guard<std::mutex> lock(shutting_down_mu_);
+ if (shutting_down_) {
+ // The main thread has started shutting down. Simply continue to drain
+ // events.
+ continue;
+ }
+
+ switch (event) {
+ case Event::kCallReceived:
+ reader_writer.Finish(
+ ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "go away"),
+ reinterpret_cast<void*>(Event::kResponseSent));
+ break;
+
+ case Event::kResponseSent:
+ // We are done.
+ break;
+ }
+ }
+ }
+ }
+
+ CompletionQueue cli_cq_;
+ std::unique_ptr<ServerCompletionQueue> srv_cq_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<grpc::GenericStub> generic_stub_;
+ std::unique_ptr<Server> server_;
+ AsyncGenericService generic_service_;
const TString server_host_;
- std::ostringstream server_address_;
- bool shutting_down_;
- bool shut_down_;
- std::mutex shutting_down_mu_;
-};
-
-TEST_F(GenericEnd2endTest, SimpleRpc) {
- ResetStub();
- SendRpc(1);
-}
-
-TEST_F(GenericEnd2endTest, SequentialRpcs) {
- ResetStub();
- SendRpc(10);
-}
-
-TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
- ResetStub();
- const int num_rpcs = 10;
+ std::ostringstream server_address_;
+ bool shutting_down_;
+ bool shut_down_;
+ std::mutex shutting_down_mu_;
+};
+
+TEST_F(GenericEnd2endTest, SimpleRpc) {
+ ResetStub();
+ SendRpc(1);
+}
+
+TEST_F(GenericEnd2endTest, SequentialRpcs) {
+ ResetStub();
+ SendRpc(10);
+}
+
+TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) {
+ ResetStub();
+ const int num_rpcs = 10;
const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter stream(&srv_ctx);
-
- // The string needs to be long enough to test heap-based slice.
- send_request.set_message("Hello world. Hello world. Hello world.");
-
- std::unique_ptr<ByteBuffer> cli_send_buffer =
- SerializeToByteBuffer(&send_request);
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter stream(&srv_ctx);
+
+ // The string needs to be long enough to test heap-based slice.
+ send_request.set_message("Hello world. Hello world. Hello world.");
+
+ std::unique_ptr<ByteBuffer> cli_send_buffer =
+ SerializeToByteBuffer(&send_request);
std::thread request_call([this]() { server_ok(4); });
- std::unique_ptr<GenericClientAsyncResponseReader> call =
- generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName,
- *cli_send_buffer.get(), &cli_cq_);
- call->StartCall();
- ByteBuffer cli_recv_buffer;
- call->Finish(&cli_recv_buffer, &recv_status, tag(1));
- std::thread client_check([this] { client_ok(1); });
-
- generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
- srv_cq_.get(), tag(4));
+ std::unique_ptr<GenericClientAsyncResponseReader> call =
+ generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName,
+ *cli_send_buffer.get(), &cli_cq_);
+ call->StartCall();
+ ByteBuffer cli_recv_buffer;
+ call->Finish(&cli_recv_buffer, &recv_status, tag(1));
+ std::thread client_check([this] { client_ok(1); });
+
+ generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(),
+ srv_cq_.get(), tag(4));
request_call.join();
- EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
- EXPECT_EQ(kMethodName, srv_ctx.method());
-
- ByteBuffer srv_recv_buffer;
- stream.Read(&srv_recv_buffer, tag(5));
- server_ok(5);
- EXPECT_TRUE(ParseFromByteBuffer(&srv_recv_buffer, &recv_request));
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- std::unique_ptr<ByteBuffer> srv_send_buffer =
- SerializeToByteBuffer(&send_response);
- stream.Write(*srv_send_buffer, tag(6));
- server_ok(6);
-
- stream.Finish(Status::OK, tag(7));
- server_ok(7);
-
- client_check.join();
- EXPECT_TRUE(ParseFromByteBuffer(&cli_recv_buffer, &recv_response));
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
-}
-
-// One ping, one pong.
-TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
- ResetStub();
-
+ EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
+ EXPECT_EQ(kMethodName, srv_ctx.method());
+
+ ByteBuffer srv_recv_buffer;
+ stream.Read(&srv_recv_buffer, tag(5));
+ server_ok(5);
+ EXPECT_TRUE(ParseFromByteBuffer(&srv_recv_buffer, &recv_request));
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ std::unique_ptr<ByteBuffer> srv_send_buffer =
+ SerializeToByteBuffer(&send_response);
+ stream.Write(*srv_send_buffer, tag(6));
+ server_ok(6);
+
+ stream.Finish(Status::OK, tag(7));
+ server_ok(7);
+
+ client_check.join();
+ EXPECT_TRUE(ParseFromByteBuffer(&cli_recv_buffer, &recv_response));
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+}
+
+// One ping, one pong.
+TEST_F(GenericEnd2endTest, SimpleBidiStreaming) {
+ ResetStub();
+
const TString kMethodName(
- "/grpc.cpp.test.util.EchoTestService/BidiStream");
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
- ClientContext cli_ctx;
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter srv_stream(&srv_ctx);
-
- cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
- send_request.set_message("Hello");
+ "/grpc.cpp.test.util.EchoTestService/BidiStream");
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+ ClientContext cli_ctx;
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter srv_stream(&srv_ctx);
+
+ cli_ctx.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+ send_request.set_message("Hello");
std::thread request_call([this]() { server_ok(2); });
- std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream =
- generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
- cli_stream->StartCall(tag(1));
- client_ok(1);
-
- generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(),
- srv_cq_.get(), tag(2));
+ std::unique_ptr<GenericClientAsyncReaderWriter> cli_stream =
+ generic_stub_->PrepareCall(&cli_ctx, kMethodName, &cli_cq_);
+ cli_stream->StartCall(tag(1));
+ client_ok(1);
+
+ generic_service_.RequestCall(&srv_ctx, &srv_stream, srv_cq_.get(),
+ srv_cq_.get(), tag(2));
request_call.join();
-
- EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
- EXPECT_EQ(kMethodName, srv_ctx.method());
-
- std::unique_ptr<ByteBuffer> send_buffer =
- SerializeToByteBuffer(&send_request);
- cli_stream->Write(*send_buffer, tag(3));
- send_buffer.reset();
- client_ok(3);
-
- ByteBuffer recv_buffer;
- srv_stream.Read(&recv_buffer, tag(4));
- server_ok(4);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- send_buffer = SerializeToByteBuffer(&send_response);
- srv_stream.Write(*send_buffer, tag(5));
- send_buffer.reset();
- server_ok(5);
-
- cli_stream->Read(&recv_buffer, tag(6));
- client_ok(6);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->WritesDone(tag(7));
- client_ok(7);
-
- srv_stream.Read(&recv_buffer, tag(8));
- server_fail(8);
-
- srv_stream.Finish(Status::OK, tag(9));
- server_ok(9);
-
- cli_stream->Finish(&recv_status, tag(10));
- client_ok(10);
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
-}
-
-TEST_F(GenericEnd2endTest, Deadline) {
- ResetStub();
- SendRpc(1, true,
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_seconds(10, GPR_TIMESPAN)));
-}
-
-TEST_F(GenericEnd2endTest, ShortDeadline) {
- ResetStub();
-
- ClientContext cli_ctx;
- EchoRequest request;
- EchoResponse response;
-
- shutting_down_ = false;
- std::thread driver([this] { DriveCompletionQueue(); });
-
- request.set_message("");
- cli_ctx.set_deadline(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(500, GPR_TIMESPAN)));
- Status s = stub_->Echo(&cli_ctx, request, &response);
- EXPECT_FALSE(s.ok());
- {
- std::lock_guard<std::mutex> lock(shutting_down_mu_);
- shutting_down_ = true;
- }
- ShutDownServerAndCQs();
- driver.join();
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+
+ EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length()));
+ EXPECT_EQ(kMethodName, srv_ctx.method());
+
+ std::unique_ptr<ByteBuffer> send_buffer =
+ SerializeToByteBuffer(&send_request);
+ cli_stream->Write(*send_buffer, tag(3));
+ send_buffer.reset();
+ client_ok(3);
+
+ ByteBuffer recv_buffer;
+ srv_stream.Read(&recv_buffer, tag(4));
+ server_ok(4);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ send_buffer = SerializeToByteBuffer(&send_response);
+ srv_stream.Write(*send_buffer, tag(5));
+ send_buffer.reset();
+ server_ok(5);
+
+ cli_stream->Read(&recv_buffer, tag(6));
+ client_ok(6);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->WritesDone(tag(7));
+ client_ok(7);
+
+ srv_stream.Read(&recv_buffer, tag(8));
+ server_fail(8);
+
+ srv_stream.Finish(Status::OK, tag(9));
+ server_ok(9);
+
+ cli_stream->Finish(&recv_status, tag(10));
+ client_ok(10);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+}
+
+TEST_F(GenericEnd2endTest, Deadline) {
+ ResetStub();
+ SendRpc(1, true,
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_seconds(10, GPR_TIMESPAN)));
+}
+
+TEST_F(GenericEnd2endTest, ShortDeadline) {
+ ResetStub();
+
+ ClientContext cli_ctx;
+ EchoRequest request;
+ EchoResponse response;
+
+ shutting_down_ = false;
+ std::thread driver([this] { DriveCompletionQueue(); });
+
+ request.set_message("");
+ cli_ctx.set_deadline(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_micros(500, GPR_TIMESPAN)));
+ Status s = stub_->Echo(&cli_ctx, request, &response);
+ EXPECT_FALSE(s.ok());
+ {
+ std::lock_guard<std::mutex> lock(shutting_down_mu_);
+ shutting_down_ = true;
+ }
+ ShutDownServerAndCQs();
+ driver.join();
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
index 9111899785..6208dc2535 100644
--- a/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/grpclb_end2end_test.cc
@@ -1,94 +1,94 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
#include <deque>
-#include <memory>
-#include <mutex>
-#include <set>
-#include <sstream>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <sstream>
#include <util/generic/string.h>
-#include <thread>
-
+#include <thread>
+
#include "y_absl/strings/str_cat.h"
#include "y_absl/strings/str_format.h"
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-
-#include "src/core/ext/filters/client_channel/backup_poller.h"
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+
+#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.h"
-#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
-#include "src/core/ext/filters/client_channel/server_address.h"
-#include "src/core/ext/filters/client_channel/service_config.h"
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
+#include "src/core/ext/filters/client_channel/server_address.h"
+#include "src/core/ext/filters/client_channel/service_config.h"
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/parse_address.h"
-#include "src/core/lib/iomgr/sockaddr.h"
-#include "src/core/lib/security/credentials/fake/fake_credentials.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/core/lib/transport/authority_override.h"
-#include "src/cpp/client/secure_credentials.h"
-#include "src/cpp/server/secure_server_credentials.h"
-
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-
-#include "src/proto/grpc/lb/v1/load_balancer.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-// TODO(dgq): Other scenarios in need of testing:
-// - Send a serverlist with faulty ip:port addresses (port > 2^16, etc).
-// - Test reception of invalid serverlist
-// - Test against a non-LB server.
-// - Random LB server closing the stream unexpectedly.
-//
-// Findings from end to end testing to be covered here:
-// - Handling of LB servers restart, including reconnection after backing-off
-// retries.
-// - Destruction of load balanced channel (and therefore of grpclb instance)
-// while:
-// 1) the internal LB call is still active. This should work by virtue
-// of the weak reference the LB call holds. The call should be terminated as
-// part of the grpclb shutdown process.
-// 2) the retry timer is active. Again, the weak reference it holds should
-// prevent a premature call to \a glb_destroy.
-
-using std::chrono::system_clock;
-
+#include "src/cpp/client/secure_credentials.h"
+#include "src/cpp/server/secure_server_credentials.h"
+
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include "src/proto/grpc/lb/v1/load_balancer.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+// TODO(dgq): Other scenarios in need of testing:
+// - Send a serverlist with faulty ip:port addresses (port > 2^16, etc).
+// - Test reception of invalid serverlist
+// - Test against a non-LB server.
+// - Random LB server closing the stream unexpectedly.
+//
+// Findings from end to end testing to be covered here:
+// - Handling of LB servers restart, including reconnection after backing-off
+// retries.
+// - Destruction of load balanced channel (and therefore of grpclb instance)
+// while:
+// 1) the internal LB call is still active. This should work by virtue
+// of the weak reference the LB call holds. The call should be terminated as
+// part of the grpclb shutdown process.
+// 2) the retry timer is active. Again, the weak reference it holds should
+// prevent a premature call to \a glb_destroy.
+
+using std::chrono::system_clock;
+
using grpc::lb::v1::LoadBalancer;
-using grpc::lb::v1::LoadBalanceRequest;
-using grpc::lb::v1::LoadBalanceResponse;
-
-namespace grpc {
-namespace testing {
-namespace {
-
+using grpc::lb::v1::LoadBalanceRequest;
+using grpc::lb::v1::LoadBalanceResponse;
+
+namespace grpc {
+namespace testing {
+namespace {
+
constexpr char kDefaultServiceConfig[] =
"{\n"
" \"loadBalancingConfig\":[\n"
@@ -96,255 +96,255 @@ constexpr char kDefaultServiceConfig[] =
" ]\n"
"}";
-template <typename ServiceType>
-class CountedService : public ServiceType {
- public:
- size_t request_count() {
- grpc::internal::MutexLock lock(&mu_);
- return request_count_;
- }
-
- size_t response_count() {
- grpc::internal::MutexLock lock(&mu_);
- return response_count_;
- }
-
- void IncreaseResponseCount() {
- grpc::internal::MutexLock lock(&mu_);
- ++response_count_;
- }
- void IncreaseRequestCount() {
- grpc::internal::MutexLock lock(&mu_);
- ++request_count_;
- }
-
- void ResetCounters() {
- grpc::internal::MutexLock lock(&mu_);
- request_count_ = 0;
- response_count_ = 0;
- }
-
- protected:
- grpc::internal::Mutex mu_;
-
- private:
- size_t request_count_ = 0;
- size_t response_count_ = 0;
-};
-
-using BackendService = CountedService<TestServiceImpl>;
-using BalancerService = CountedService<LoadBalancer::Service>;
-
-const char g_kCallCredsMdKey[] = "Balancer should not ...";
-const char g_kCallCredsMdValue[] = "... receive me";
-
-class BackendServiceImpl : public BackendService {
- public:
- BackendServiceImpl() {}
-
- Status Echo(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- // Backend should receive the call credentials metadata.
- auto call_credentials_entry =
- context->client_metadata().find(g_kCallCredsMdKey);
- EXPECT_NE(call_credentials_entry, context->client_metadata().end());
- if (call_credentials_entry != context->client_metadata().end()) {
- EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue);
- }
- IncreaseRequestCount();
- const auto status = TestServiceImpl::Echo(context, request, response);
- IncreaseResponseCount();
- AddClient(context->peer().c_str());
- return status;
- }
-
- void Start() {}
-
- void Shutdown() {}
-
+template <typename ServiceType>
+class CountedService : public ServiceType {
+ public:
+ size_t request_count() {
+ grpc::internal::MutexLock lock(&mu_);
+ return request_count_;
+ }
+
+ size_t response_count() {
+ grpc::internal::MutexLock lock(&mu_);
+ return response_count_;
+ }
+
+ void IncreaseResponseCount() {
+ grpc::internal::MutexLock lock(&mu_);
+ ++response_count_;
+ }
+ void IncreaseRequestCount() {
+ grpc::internal::MutexLock lock(&mu_);
+ ++request_count_;
+ }
+
+ void ResetCounters() {
+ grpc::internal::MutexLock lock(&mu_);
+ request_count_ = 0;
+ response_count_ = 0;
+ }
+
+ protected:
+ grpc::internal::Mutex mu_;
+
+ private:
+ size_t request_count_ = 0;
+ size_t response_count_ = 0;
+};
+
+using BackendService = CountedService<TestServiceImpl>;
+using BalancerService = CountedService<LoadBalancer::Service>;
+
+const char g_kCallCredsMdKey[] = "Balancer should not ...";
+const char g_kCallCredsMdValue[] = "... receive me";
+
+class BackendServiceImpl : public BackendService {
+ public:
+ BackendServiceImpl() {}
+
+ Status Echo(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ // Backend should receive the call credentials metadata.
+ auto call_credentials_entry =
+ context->client_metadata().find(g_kCallCredsMdKey);
+ EXPECT_NE(call_credentials_entry, context->client_metadata().end());
+ if (call_credentials_entry != context->client_metadata().end()) {
+ EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue);
+ }
+ IncreaseRequestCount();
+ const auto status = TestServiceImpl::Echo(context, request, response);
+ IncreaseResponseCount();
+ AddClient(context->peer().c_str());
+ return status;
+ }
+
+ void Start() {}
+
+ void Shutdown() {}
+
std::set<TString> clients() {
- grpc::internal::MutexLock lock(&clients_mu_);
- return clients_;
- }
-
- private:
+ grpc::internal::MutexLock lock(&clients_mu_);
+ return clients_;
+ }
+
+ private:
void AddClient(const TString& client) {
- grpc::internal::MutexLock lock(&clients_mu_);
- clients_.insert(client);
- }
-
- grpc::internal::Mutex mu_;
- grpc::internal::Mutex clients_mu_;
+ grpc::internal::MutexLock lock(&clients_mu_);
+ clients_.insert(client);
+ }
+
+ grpc::internal::Mutex mu_;
+ grpc::internal::Mutex clients_mu_;
std::set<TString> clients_;
-};
-
+};
+
TString Ip4ToPackedString(const char* ip_str) {
- struct in_addr ip4;
- GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1);
+ struct in_addr ip4;
+ GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1);
return TString(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
-}
-
-struct ClientStats {
- size_t num_calls_started = 0;
- size_t num_calls_finished = 0;
- size_t num_calls_finished_with_client_failed_to_send = 0;
- size_t num_calls_finished_known_received = 0;
+}
+
+struct ClientStats {
+ size_t num_calls_started = 0;
+ size_t num_calls_finished = 0;
+ size_t num_calls_finished_with_client_failed_to_send = 0;
+ size_t num_calls_finished_known_received = 0;
std::map<TString, size_t> drop_token_counts;
-
- ClientStats& operator+=(const ClientStats& other) {
- num_calls_started += other.num_calls_started;
- num_calls_finished += other.num_calls_finished;
- num_calls_finished_with_client_failed_to_send +=
- other.num_calls_finished_with_client_failed_to_send;
- num_calls_finished_known_received +=
- other.num_calls_finished_known_received;
- for (const auto& p : other.drop_token_counts) {
- drop_token_counts[p.first] += p.second;
- }
- return *this;
- }
-
- void Reset() {
- num_calls_started = 0;
- num_calls_finished = 0;
- num_calls_finished_with_client_failed_to_send = 0;
- num_calls_finished_known_received = 0;
- drop_token_counts.clear();
- }
-};
-
-class BalancerServiceImpl : public BalancerService {
- public:
- using Stream = ServerReaderWriter<LoadBalanceResponse, LoadBalanceRequest>;
- using ResponseDelayPair = std::pair<LoadBalanceResponse, int>;
-
- explicit BalancerServiceImpl(int client_load_reporting_interval_seconds)
- : client_load_reporting_interval_seconds_(
- client_load_reporting_interval_seconds) {}
-
- Status BalanceLoad(ServerContext* context, Stream* stream) override {
- gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this);
- {
- grpc::internal::MutexLock lock(&mu_);
- if (serverlist_done_) goto done;
- }
- {
- // Balancer shouldn't receive the call credentials metadata.
- EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey),
- context->client_metadata().end());
- LoadBalanceRequest request;
- std::vector<ResponseDelayPair> responses_and_delays;
-
- if (!stream->Read(&request)) {
- goto done;
+
+ ClientStats& operator+=(const ClientStats& other) {
+ num_calls_started += other.num_calls_started;
+ num_calls_finished += other.num_calls_finished;
+ num_calls_finished_with_client_failed_to_send +=
+ other.num_calls_finished_with_client_failed_to_send;
+ num_calls_finished_known_received +=
+ other.num_calls_finished_known_received;
+ for (const auto& p : other.drop_token_counts) {
+ drop_token_counts[p.first] += p.second;
+ }
+ return *this;
+ }
+
+ void Reset() {
+ num_calls_started = 0;
+ num_calls_finished = 0;
+ num_calls_finished_with_client_failed_to_send = 0;
+ num_calls_finished_known_received = 0;
+ drop_token_counts.clear();
+ }
+};
+
+class BalancerServiceImpl : public BalancerService {
+ public:
+ using Stream = ServerReaderWriter<LoadBalanceResponse, LoadBalanceRequest>;
+ using ResponseDelayPair = std::pair<LoadBalanceResponse, int>;
+
+ explicit BalancerServiceImpl(int client_load_reporting_interval_seconds)
+ : client_load_reporting_interval_seconds_(
+ client_load_reporting_interval_seconds) {}
+
+ Status BalanceLoad(ServerContext* context, Stream* stream) override {
+ gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this);
+ {
+ grpc::internal::MutexLock lock(&mu_);
+ if (serverlist_done_) goto done;
+ }
+ {
+ // Balancer shouldn't receive the call credentials metadata.
+ EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey),
+ context->client_metadata().end());
+ LoadBalanceRequest request;
+ std::vector<ResponseDelayPair> responses_and_delays;
+
+ if (!stream->Read(&request)) {
+ goto done;
} else {
if (request.has_initial_request()) {
grpc::internal::MutexLock lock(&mu_);
service_names_.push_back(request.initial_request().name());
}
- }
- IncreaseRequestCount();
- gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this,
- request.DebugString().c_str());
-
- // TODO(juanlishen): Initial response should always be the first response.
- if (client_load_reporting_interval_seconds_ > 0) {
- LoadBalanceResponse initial_response;
- initial_response.mutable_initial_response()
- ->mutable_client_stats_report_interval()
- ->set_seconds(client_load_reporting_interval_seconds_);
- stream->Write(initial_response);
- }
-
- {
- grpc::internal::MutexLock lock(&mu_);
- responses_and_delays = responses_and_delays_;
- }
- for (const auto& response_and_delay : responses_and_delays) {
- SendResponse(stream, response_and_delay.first,
- response_and_delay.second);
- }
- {
- grpc::internal::MutexLock lock(&mu_);
- serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; });
- }
-
- if (client_load_reporting_interval_seconds_ > 0) {
- request.Clear();
+ }
+ IncreaseRequestCount();
+ gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this,
+ request.DebugString().c_str());
+
+ // TODO(juanlishen): Initial response should always be the first response.
+ if (client_load_reporting_interval_seconds_ > 0) {
+ LoadBalanceResponse initial_response;
+ initial_response.mutable_initial_response()
+ ->mutable_client_stats_report_interval()
+ ->set_seconds(client_load_reporting_interval_seconds_);
+ stream->Write(initial_response);
+ }
+
+ {
+ grpc::internal::MutexLock lock(&mu_);
+ responses_and_delays = responses_and_delays_;
+ }
+ for (const auto& response_and_delay : responses_and_delays) {
+ SendResponse(stream, response_and_delay.first,
+ response_and_delay.second);
+ }
+ {
+ grpc::internal::MutexLock lock(&mu_);
+ serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; });
+ }
+
+ if (client_load_reporting_interval_seconds_ > 0) {
+ request.Clear();
while (stream->Read(&request)) {
- gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'",
- this, request.DebugString().c_str());
- GPR_ASSERT(request.has_client_stats());
+ gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'",
+ this, request.DebugString().c_str());
+ GPR_ASSERT(request.has_client_stats());
ClientStats load_report;
load_report.num_calls_started =
- request.client_stats().num_calls_started();
+ request.client_stats().num_calls_started();
load_report.num_calls_finished =
- request.client_stats().num_calls_finished();
+ request.client_stats().num_calls_finished();
load_report.num_calls_finished_with_client_failed_to_send =
- request.client_stats()
- .num_calls_finished_with_client_failed_to_send();
+ request.client_stats()
+ .num_calls_finished_with_client_failed_to_send();
load_report.num_calls_finished_known_received =
- request.client_stats().num_calls_finished_known_received();
- for (const auto& drop_token_count :
- request.client_stats().calls_finished_with_drop()) {
+ request.client_stats().num_calls_finished_known_received();
+ for (const auto& drop_token_count :
+ request.client_stats().calls_finished_with_drop()) {
load_report
.drop_token_counts[drop_token_count.load_balance_token()] =
- drop_token_count.num_calls();
- }
+ drop_token_count.num_calls();
+ }
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
grpc::internal::MutexLock lock(&mu_);
load_report_queue_.emplace_back(std::move(load_report));
if (load_report_cond_ != nullptr) load_report_cond_->Signal();
- }
- }
- }
- done:
- gpr_log(GPR_INFO, "LB[%p]: done", this);
- return Status::OK;
- }
-
- void add_response(const LoadBalanceResponse& response, int send_after_ms) {
- grpc::internal::MutexLock lock(&mu_);
- responses_and_delays_.push_back(std::make_pair(response, send_after_ms));
- }
-
- void Start() {
- grpc::internal::MutexLock lock(&mu_);
- serverlist_done_ = false;
- responses_and_delays_.clear();
+ }
+ }
+ }
+ done:
+ gpr_log(GPR_INFO, "LB[%p]: done", this);
+ return Status::OK;
+ }
+
+ void add_response(const LoadBalanceResponse& response, int send_after_ms) {
+ grpc::internal::MutexLock lock(&mu_);
+ responses_and_delays_.push_back(std::make_pair(response, send_after_ms));
+ }
+
+ void Start() {
+ grpc::internal::MutexLock lock(&mu_);
+ serverlist_done_ = false;
+ responses_and_delays_.clear();
load_report_queue_.clear();
- }
-
- void Shutdown() {
- NotifyDoneWithServerlists();
- gpr_log(GPR_INFO, "LB[%p]: shut down", this);
- }
-
- static LoadBalanceResponse BuildResponseForBackends(
- const std::vector<int>& backend_ports,
+ }
+
+ void Shutdown() {
+ NotifyDoneWithServerlists();
+ gpr_log(GPR_INFO, "LB[%p]: shut down", this);
+ }
+
+ static LoadBalanceResponse BuildResponseForBackends(
+ const std::vector<int>& backend_ports,
const std::map<TString, size_t>& drop_token_counts) {
- LoadBalanceResponse response;
- for (const auto& drop_token_count : drop_token_counts) {
- for (size_t i = 0; i < drop_token_count.second; ++i) {
- auto* server = response.mutable_server_list()->add_servers();
- server->set_drop(true);
- server->set_load_balance_token(drop_token_count.first);
- }
- }
- for (const int& backend_port : backend_ports) {
- auto* server = response.mutable_server_list()->add_servers();
- server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
- server->set_port(backend_port);
- static int token_count = 0;
+ LoadBalanceResponse response;
+ for (const auto& drop_token_count : drop_token_counts) {
+ for (size_t i = 0; i < drop_token_count.second; ++i) {
+ auto* server = response.mutable_server_list()->add_servers();
+ server->set_drop(true);
+ server->set_load_balance_token(drop_token_count.first);
+ }
+ }
+ for (const int& backend_port : backend_ports) {
+ auto* server = response.mutable_server_list()->add_servers();
+ server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
+ server->set_port(backend_port);
+ static int token_count = 0;
server->set_load_balance_token(
y_absl::StrFormat("token%03d", ++token_count));
- }
- return response;
- }
-
+ }
+ return response;
+ }
+
ClientStats WaitForLoadReport() {
- grpc::internal::MutexLock lock(&mu_);
+ grpc::internal::MutexLock lock(&mu_);
grpc::internal::CondVar cv;
if (load_report_queue_.empty()) {
load_report_cond_ = &cv;
@@ -355,216 +355,216 @@ class BalancerServiceImpl : public BalancerService {
ClientStats load_report = std::move(load_report_queue_.front());
load_report_queue_.pop_front();
return load_report;
- }
-
- void NotifyDoneWithServerlists() {
- grpc::internal::MutexLock lock(&mu_);
- if (!serverlist_done_) {
- serverlist_done_ = true;
- serverlist_cond_.Broadcast();
- }
- }
-
+ }
+
+ void NotifyDoneWithServerlists() {
+ grpc::internal::MutexLock lock(&mu_);
+ if (!serverlist_done_) {
+ serverlist_done_ = true;
+ serverlist_cond_.Broadcast();
+ }
+ }
+
std::vector<TString> service_names() {
grpc::internal::MutexLock lock(&mu_);
return service_names_;
}
- private:
- void SendResponse(Stream* stream, const LoadBalanceResponse& response,
- int delay_ms) {
- gpr_log(GPR_INFO, "LB[%p]: sleeping for %d ms...", this, delay_ms);
- if (delay_ms > 0) {
- gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
- }
- gpr_log(GPR_INFO, "LB[%p]: Woke up! Sending response '%s'", this,
- response.DebugString().c_str());
- IncreaseResponseCount();
- stream->Write(response);
- }
-
- const int client_load_reporting_interval_seconds_;
- std::vector<ResponseDelayPair> responses_and_delays_;
+ private:
+ void SendResponse(Stream* stream, const LoadBalanceResponse& response,
+ int delay_ms) {
+ gpr_log(GPR_INFO, "LB[%p]: sleeping for %d ms...", this, delay_ms);
+ if (delay_ms > 0) {
+ gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
+ }
+ gpr_log(GPR_INFO, "LB[%p]: Woke up! Sending response '%s'", this,
+ response.DebugString().c_str());
+ IncreaseResponseCount();
+ stream->Write(response);
+ }
+
+ const int client_load_reporting_interval_seconds_;
+ std::vector<ResponseDelayPair> responses_and_delays_;
std::vector<TString> service_names_;
- grpc::internal::Mutex mu_;
- grpc::internal::CondVar serverlist_cond_;
- bool serverlist_done_ = false;
+ grpc::internal::Mutex mu_;
+ grpc::internal::CondVar serverlist_cond_;
+ bool serverlist_done_ = false;
grpc::internal::CondVar* load_report_cond_ = nullptr;
std::deque<ClientStats> load_report_queue_;
-};
-
-class GrpclbEnd2endTest : public ::testing::Test {
- protected:
- GrpclbEnd2endTest(size_t num_backends, size_t num_balancers,
- int client_load_reporting_interval_seconds)
- : server_host_("localhost"),
- num_backends_(num_backends),
- num_balancers_(num_balancers),
- client_load_reporting_interval_seconds_(
- client_load_reporting_interval_seconds) {}
-
- static void SetUpTestCase() {
- // Make the backup poller poll very frequently in order to pick up
- // updates from all the subchannels's FDs.
- GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
- grpc_init();
- }
-
- static void TearDownTestCase() { grpc_shutdown(); }
-
- void SetUp() override {
- response_generator_ =
- grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
- // Start the backends.
- for (size_t i = 0; i < num_backends_; ++i) {
- backends_.emplace_back(new ServerThread<BackendServiceImpl>("backend"));
- backends_.back()->Start(server_host_);
- }
- // Start the load balancers.
- for (size_t i = 0; i < num_balancers_; ++i) {
- balancers_.emplace_back(new ServerThread<BalancerServiceImpl>(
- "balancer", client_load_reporting_interval_seconds_));
- balancers_.back()->Start(server_host_);
- }
- ResetStub();
- }
-
- void TearDown() override {
- ShutdownAllBackends();
- for (auto& balancer : balancers_) balancer->Shutdown();
- }
-
- void StartAllBackends() {
- for (auto& backend : backends_) backend->Start(server_host_);
- }
-
- void StartBackend(size_t index) { backends_[index]->Start(server_host_); }
-
- void ShutdownAllBackends() {
- for (auto& backend : backends_) backend->Shutdown();
- }
-
- void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
-
- void ResetStub(int fallback_timeout = 0,
+};
+
+class GrpclbEnd2endTest : public ::testing::Test {
+ protected:
+ GrpclbEnd2endTest(size_t num_backends, size_t num_balancers,
+ int client_load_reporting_interval_seconds)
+ : server_host_("localhost"),
+ num_backends_(num_backends),
+ num_balancers_(num_balancers),
+ client_load_reporting_interval_seconds_(
+ client_load_reporting_interval_seconds) {}
+
+ static void SetUpTestCase() {
+ // Make the backup poller poll very frequently in order to pick up
+ // updates from all the subchannels's FDs.
+ GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+ grpc_init();
+ }
+
+ static void TearDownTestCase() { grpc_shutdown(); }
+
+ void SetUp() override {
+ response_generator_ =
+ grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
+ // Start the backends.
+ for (size_t i = 0; i < num_backends_; ++i) {
+ backends_.emplace_back(new ServerThread<BackendServiceImpl>("backend"));
+ backends_.back()->Start(server_host_);
+ }
+ // Start the load balancers.
+ for (size_t i = 0; i < num_balancers_; ++i) {
+ balancers_.emplace_back(new ServerThread<BalancerServiceImpl>(
+ "balancer", client_load_reporting_interval_seconds_));
+ balancers_.back()->Start(server_host_);
+ }
+ ResetStub();
+ }
+
+ void TearDown() override {
+ ShutdownAllBackends();
+ for (auto& balancer : balancers_) balancer->Shutdown();
+ }
+
+ void StartAllBackends() {
+ for (auto& backend : backends_) backend->Start(server_host_);
+ }
+
+ void StartBackend(size_t index) { backends_[index]->Start(server_host_); }
+
+ void ShutdownAllBackends() {
+ for (auto& backend : backends_) backend->Shutdown();
+ }
+
+ void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
+
+ void ResetStub(int fallback_timeout = 0,
const TString& expected_targets = "") {
- ChannelArguments args;
- if (fallback_timeout > 0) args.SetGrpclbFallbackTimeout(fallback_timeout);
- args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
- response_generator_.get());
- if (!expected_targets.empty()) {
- args.SetString(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
- }
- std::ostringstream uri;
- uri << "fake:///" << kApplicationTargetName_;
- // TODO(dgq): templatize tests to run everything using both secure and
- // insecure channel credentials.
- grpc_channel_credentials* channel_creds =
- grpc_fake_transport_security_credentials_create();
- grpc_call_credentials* call_creds = grpc_md_only_test_credentials_create(
- g_kCallCredsMdKey, g_kCallCredsMdValue, false);
- std::shared_ptr<ChannelCredentials> creds(
- new SecureChannelCredentials(grpc_composite_channel_credentials_create(
- channel_creds, call_creds, nullptr)));
- call_creds->Unref();
- channel_creds->Unref();
- channel_ = ::grpc::CreateCustomChannel(uri.str(), creds, args);
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- void ResetBackendCounters() {
- for (auto& backend : backends_) backend->service_.ResetCounters();
- }
-
- ClientStats WaitForLoadReports() {
- ClientStats client_stats;
- for (auto& balancer : balancers_) {
- client_stats += balancer->service_.WaitForLoadReport();
- }
- return client_stats;
- }
-
- bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0) {
- if (stop_index == 0) stop_index = backends_.size();
- for (size_t i = start_index; i < stop_index; ++i) {
- if (backends_[i]->service_.request_count() == 0) return false;
- }
- return true;
- }
-
- void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
- int* num_drops) {
- const Status status = SendRpc();
- if (status.ok()) {
- ++*num_ok;
- } else {
- if (status.error_message() == "Call dropped by load balancing policy") {
- ++*num_drops;
- } else {
- ++*num_failure;
- }
- }
- ++*num_total;
- }
-
- std::tuple<int, int, int> WaitForAllBackends(int num_requests_multiple_of = 1,
- size_t start_index = 0,
- size_t stop_index = 0) {
- int num_ok = 0;
- int num_failure = 0;
- int num_drops = 0;
- int num_total = 0;
- while (!SeenAllBackends(start_index, stop_index)) {
- SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
- }
- while (num_total % num_requests_multiple_of != 0) {
- SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
- }
- ResetBackendCounters();
- gpr_log(GPR_INFO,
- "Performed %d warm up requests (a multiple of %d) against the "
- "backends. %d succeeded, %d failed, %d dropped.",
- num_total, num_requests_multiple_of, num_ok, num_failure,
- num_drops);
- return std::make_tuple(num_ok, num_failure, num_drops);
- }
-
- void WaitForBackend(size_t backend_idx) {
- do {
- (void)SendRpc();
- } while (backends_[backend_idx]->service_.request_count() == 0);
- ResetBackendCounters();
- }
-
- struct AddressData {
- int port;
+ ChannelArguments args;
+ if (fallback_timeout > 0) args.SetGrpclbFallbackTimeout(fallback_timeout);
+ args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
+ response_generator_.get());
+ if (!expected_targets.empty()) {
+ args.SetString(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
+ }
+ std::ostringstream uri;
+ uri << "fake:///" << kApplicationTargetName_;
+ // TODO(dgq): templatize tests to run everything using both secure and
+ // insecure channel credentials.
+ grpc_channel_credentials* channel_creds =
+ grpc_fake_transport_security_credentials_create();
+ grpc_call_credentials* call_creds = grpc_md_only_test_credentials_create(
+ g_kCallCredsMdKey, g_kCallCredsMdValue, false);
+ std::shared_ptr<ChannelCredentials> creds(
+ new SecureChannelCredentials(grpc_composite_channel_credentials_create(
+ channel_creds, call_creds, nullptr)));
+ call_creds->Unref();
+ channel_creds->Unref();
+ channel_ = ::grpc::CreateCustomChannel(uri.str(), creds, args);
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ void ResetBackendCounters() {
+ for (auto& backend : backends_) backend->service_.ResetCounters();
+ }
+
+ ClientStats WaitForLoadReports() {
+ ClientStats client_stats;
+ for (auto& balancer : balancers_) {
+ client_stats += balancer->service_.WaitForLoadReport();
+ }
+ return client_stats;
+ }
+
+ bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0) {
+ if (stop_index == 0) stop_index = backends_.size();
+ for (size_t i = start_index; i < stop_index; ++i) {
+ if (backends_[i]->service_.request_count() == 0) return false;
+ }
+ return true;
+ }
+
+ void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
+ int* num_drops) {
+ const Status status = SendRpc();
+ if (status.ok()) {
+ ++*num_ok;
+ } else {
+ if (status.error_message() == "Call dropped by load balancing policy") {
+ ++*num_drops;
+ } else {
+ ++*num_failure;
+ }
+ }
+ ++*num_total;
+ }
+
+ std::tuple<int, int, int> WaitForAllBackends(int num_requests_multiple_of = 1,
+ size_t start_index = 0,
+ size_t stop_index = 0) {
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ int num_total = 0;
+ while (!SeenAllBackends(start_index, stop_index)) {
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
+ }
+ while (num_total % num_requests_multiple_of != 0) {
+ SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
+ }
+ ResetBackendCounters();
+ gpr_log(GPR_INFO,
+ "Performed %d warm up requests (a multiple of %d) against the "
+ "backends. %d succeeded, %d failed, %d dropped.",
+ num_total, num_requests_multiple_of, num_ok, num_failure,
+ num_drops);
+ return std::make_tuple(num_ok, num_failure, num_drops);
+ }
+
+ void WaitForBackend(size_t backend_idx) {
+ do {
+ (void)SendRpc();
+ } while (backends_[backend_idx]->service_.request_count() == 0);
+ ResetBackendCounters();
+ }
+
+ struct AddressData {
+ int port;
TString balancer_name;
- };
-
+ };
+
static grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
- const std::vector<AddressData>& address_data) {
- grpc_core::ServerAddressList addresses;
- for (const auto& addr : address_data) {
+ const std::vector<AddressData>& address_data) {
+ grpc_core::ServerAddressList addresses;
+ for (const auto& addr : address_data) {
TString lb_uri_str = y_absl::StrCat("ipv4:127.0.0.1:", addr.port);
grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str.c_str(), true);
- GPR_ASSERT(lb_uri != nullptr);
- grpc_resolved_address address;
- GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
+ GPR_ASSERT(lb_uri != nullptr);
+ grpc_resolved_address address;
+ GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
grpc_arg arg = grpc_core::CreateAuthorityOverrideChannelArg(
addr.balancer_name.c_str());
grpc_channel_args* args =
grpc_channel_args_copy_and_add(nullptr, &arg, 1);
- addresses.emplace_back(address.addr, address.len, args);
- grpc_uri_destroy(lb_uri);
- }
- return addresses;
- }
-
+ addresses.emplace_back(address.addr, address.len, args);
+ grpc_uri_destroy(lb_uri);
+ }
+ return addresses;
+ }
+
static grpc_core::Resolver::Result MakeResolverResult(
const std::vector<AddressData>& balancer_address_data,
const std::vector<AddressData>& backend_address_data = {},
@@ -583,191 +583,191 @@ class GrpclbEnd2endTest : public ::testing::Test {
return result;
}
- void SetNextResolutionAllBalancers(
+ void SetNextResolutionAllBalancers(
const char* service_config_json = kDefaultServiceConfig) {
- std::vector<AddressData> addresses;
- for (size_t i = 0; i < balancers_.size(); ++i) {
+ std::vector<AddressData> addresses;
+ for (size_t i = 0; i < balancers_.size(); ++i) {
addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
- }
+ }
SetNextResolution(addresses, {}, service_config_json);
- }
-
+ }
+
void SetNextResolution(
const std::vector<AddressData>& balancer_address_data,
const std::vector<AddressData>& backend_address_data = {},
const char* service_config_json = kDefaultServiceConfig) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result = MakeResolverResult(
balancer_address_data, backend_address_data, service_config_json);
- response_generator_->SetResponse(std::move(result));
- }
-
- void SetNextReresolutionResponse(
+ response_generator_->SetResponse(std::move(result));
+ }
+
+ void SetNextReresolutionResponse(
const std::vector<AddressData>& balancer_address_data,
const std::vector<AddressData>& backend_address_data = {},
const char* service_config_json = kDefaultServiceConfig) {
- grpc_core::ExecCtx exec_ctx;
+ grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result = MakeResolverResult(
balancer_address_data, backend_address_data, service_config_json);
- response_generator_->SetReresolutionResponse(std::move(result));
- }
-
- const std::vector<int> GetBackendPorts(size_t start_index = 0,
- size_t stop_index = 0) const {
- if (stop_index == 0) stop_index = backends_.size();
- std::vector<int> backend_ports;
- for (size_t i = start_index; i < stop_index; ++i) {
- backend_ports.push_back(backends_[i]->port_);
- }
- return backend_ports;
- }
-
- void ScheduleResponseForBalancer(size_t i,
- const LoadBalanceResponse& response,
- int delay_ms) {
- balancers_[i]->service_.add_response(response, delay_ms);
- }
-
- Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000,
+ response_generator_->SetReresolutionResponse(std::move(result));
+ }
+
+ const std::vector<int> GetBackendPorts(size_t start_index = 0,
+ size_t stop_index = 0) const {
+ if (stop_index == 0) stop_index = backends_.size();
+ std::vector<int> backend_ports;
+ for (size_t i = start_index; i < stop_index; ++i) {
+ backend_ports.push_back(backends_[i]->port_);
+ }
+ return backend_ports;
+ }
+
+ void ScheduleResponseForBalancer(size_t i,
+ const LoadBalanceResponse& response,
+ int delay_ms) {
+ balancers_[i]->service_.add_response(response, delay_ms);
+ }
+
+ Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000,
bool wait_for_ready = false,
const Status& expected_status = Status::OK) {
- const bool local_response = (response == nullptr);
- if (local_response) response = new EchoResponse;
- EchoRequest request;
- request.set_message(kRequestMessage_);
+ const bool local_response = (response == nullptr);
+ if (local_response) response = new EchoResponse;
+ EchoRequest request;
+ request.set_message(kRequestMessage_);
if (!expected_status.ok()) {
auto* error = request.mutable_param()->mutable_expected_error();
error->set_code(expected_status.error_code());
error->set_error_message(expected_status.error_message());
}
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
- if (wait_for_ready) context.set_wait_for_ready(true);
- Status status = stub_->Echo(&context, request, response);
- if (local_response) delete response;
- return status;
- }
-
- void CheckRpcSendOk(const size_t times = 1, const int timeout_ms = 1000,
- bool wait_for_ready = false) {
- for (size_t i = 0; i < times; ++i) {
- EchoResponse response;
- const Status status = SendRpc(&response, timeout_ms, wait_for_ready);
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage_);
- }
- }
-
- void CheckRpcSendFailure() {
- const Status status = SendRpc();
- EXPECT_FALSE(status.ok());
- }
-
- template <typename T>
- struct ServerThread {
- template <typename... Args>
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
+ if (wait_for_ready) context.set_wait_for_ready(true);
+ Status status = stub_->Echo(&context, request, response);
+ if (local_response) delete response;
+ return status;
+ }
+
+ void CheckRpcSendOk(const size_t times = 1, const int timeout_ms = 1000,
+ bool wait_for_ready = false) {
+ for (size_t i = 0; i < times; ++i) {
+ EchoResponse response;
+ const Status status = SendRpc(&response, timeout_ms, wait_for_ready);
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ EXPECT_EQ(response.message(), kRequestMessage_);
+ }
+ }
+
+ void CheckRpcSendFailure() {
+ const Status status = SendRpc();
+ EXPECT_FALSE(status.ok());
+ }
+
+ template <typename T>
+ struct ServerThread {
+ template <typename... Args>
explicit ServerThread(const TString& type, Args&&... args)
- : port_(grpc_pick_unused_port_or_die()),
- type_(type),
- service_(std::forward<Args>(args)...) {}
-
+ : port_(grpc_pick_unused_port_or_die()),
+ type_(type),
+ service_(std::forward<Args>(args)...) {}
+
void Start(const TString& server_host) {
- gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
- GPR_ASSERT(!running_);
- running_ = true;
- service_.Start();
- grpc::internal::Mutex mu;
- // We need to acquire the lock here in order to prevent the notify_one
- // by ServerThread::Serve from firing before the wait below is hit.
- grpc::internal::MutexLock lock(&mu);
- grpc::internal::CondVar cond;
- thread_.reset(new std::thread(
- std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
- cond.Wait(&mu);
- gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
- }
-
+ gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
+ GPR_ASSERT(!running_);
+ running_ = true;
+ service_.Start();
+ grpc::internal::Mutex mu;
+ // We need to acquire the lock here in order to prevent the notify_one
+ // by ServerThread::Serve from firing before the wait below is hit.
+ grpc::internal::MutexLock lock(&mu);
+ grpc::internal::CondVar cond;
+ thread_.reset(new std::thread(
+ std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
+ cond.Wait(&mu);
+ gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
+ }
+
void Serve(const TString& server_host, grpc::internal::Mutex* mu,
- grpc::internal::CondVar* cond) {
- // We need to acquire the lock here in order to prevent the notify_one
- // below from firing before its corresponding wait is executed.
- grpc::internal::MutexLock lock(mu);
- std::ostringstream server_address;
- server_address << server_host << ":" << port_;
- ServerBuilder builder;
- std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
- grpc_fake_transport_security_server_credentials_create()));
- builder.AddListeningPort(server_address.str(), creds);
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- cond->Signal();
- }
-
- void Shutdown() {
- if (!running_) return;
- gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str());
- service_.Shutdown();
- server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
- thread_->join();
- gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str());
- running_ = false;
- }
-
- const int port_;
+ grpc::internal::CondVar* cond) {
+ // We need to acquire the lock here in order to prevent the notify_one
+ // below from firing before its corresponding wait is executed.
+ grpc::internal::MutexLock lock(mu);
+ std::ostringstream server_address;
+ server_address << server_host << ":" << port_;
+ ServerBuilder builder;
+ std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
+ grpc_fake_transport_security_server_credentials_create()));
+ builder.AddListeningPort(server_address.str(), creds);
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ cond->Signal();
+ }
+
+ void Shutdown() {
+ if (!running_) return;
+ gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str());
+ service_.Shutdown();
+ server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
+ thread_->join();
+ gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str());
+ running_ = false;
+ }
+
+ const int port_;
TString type_;
- T service_;
- std::unique_ptr<Server> server_;
- std::unique_ptr<std::thread> thread_;
- bool running_ = false;
- };
-
+ T service_;
+ std::unique_ptr<Server> server_;
+ std::unique_ptr<std::thread> thread_;
+ bool running_ = false;
+ };
+
const TString server_host_;
- const size_t num_backends_;
- const size_t num_balancers_;
- const int client_load_reporting_interval_seconds_;
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::vector<std::unique_ptr<ServerThread<BackendServiceImpl>>> backends_;
- std::vector<std::unique_ptr<ServerThread<BalancerServiceImpl>>> balancers_;
- grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
- response_generator_;
+ const size_t num_backends_;
+ const size_t num_balancers_;
+ const int client_load_reporting_interval_seconds_;
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::vector<std::unique_ptr<ServerThread<BackendServiceImpl>>> backends_;
+ std::vector<std::unique_ptr<ServerThread<BalancerServiceImpl>>> balancers_;
+ grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
+ response_generator_;
const TString kRequestMessage_ = "Live long and prosper.";
const TString kApplicationTargetName_ = "application_target_name";
-};
-
-class SingleBalancerTest : public GrpclbEnd2endTest {
- public:
- SingleBalancerTest() : GrpclbEnd2endTest(4, 1, 0) {}
-};
-
-TEST_F(SingleBalancerTest, Vanilla) {
- SetNextResolutionAllBalancers();
- const size_t kNumRpcsPerAddress = 100;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // Make sure that trying to connect works without a call.
- channel_->GetState(true /* try_to_connect */);
- // We need to wait for all backends to come online.
- WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
-
- // Each backend should have gotten 100 requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
- }
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
+};
+
+class SingleBalancerTest : public GrpclbEnd2endTest {
+ public:
+ SingleBalancerTest() : GrpclbEnd2endTest(4, 1, 0) {}
+};
+
+TEST_F(SingleBalancerTest, Vanilla) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumRpcsPerAddress = 100;
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ // Make sure that trying to connect works without a call.
+ channel_->GetState(true /* try_to_connect */);
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
+
+ // Each backend should have gotten 100 requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
+ }
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+
+ // Check LB policy name for the channel.
+ EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
TEST_F(SingleBalancerTest, ReturnServerStatus) {
SetNextResolutionAllBalancers();
ScheduleResponseForBalancer(
@@ -784,550 +784,550 @@ TEST_F(SingleBalancerTest, ReturnServerStatus) {
EXPECT_EQ(actual.error_message(), expected.error_message());
}
-TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) {
- SetNextResolutionAllBalancers(
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"does_not_exist\":{} },\n"
- " { \"grpclb\":{} }\n"
- " ]\n"
- "}");
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */);
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
-TEST_F(SingleBalancerTest,
- SelectGrpclbWithMigrationServiceConfigAndNoAddresses) {
- const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
+TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) {
+ SetNextResolutionAllBalancers(
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"does_not_exist\":{} },\n"
+ " { \"grpclb\":{} }\n"
+ " ]\n"
+ "}");
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */);
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ // Check LB policy name for the channel.
+ EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
+TEST_F(SingleBalancerTest,
+ SelectGrpclbWithMigrationServiceConfigAndNoAddresses) {
+ const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
+ ResetStub(kFallbackTimeoutMs);
SetNextResolution({}, {},
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"does_not_exist\":{} },\n"
- " { \"grpclb\":{} }\n"
- " ]\n"
- "}");
- // Try to connect.
- EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(true));
- // Should go into state TRANSIENT_FAILURE when we enter fallback mode.
- const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(1);
- grpc_connectivity_state state;
- while ((state = channel_->GetState(false)) !=
- GRPC_CHANNEL_TRANSIENT_FAILURE) {
- ASSERT_TRUE(channel_->WaitForStateChange(state, deadline));
- }
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
-TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) {
- SetNextResolutionAllBalancers(
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"grpclb\":{\n"
- " \"childPolicy\":[\n"
- " { \"pick_first\":{} }\n"
- " ]\n"
- " } }\n"
- " ]\n"
- "}");
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- const size_t kNumRpcs = num_backends_ * 2;
- CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // Check that all requests went to the first backend. This verifies
- // that we used pick_first instead of round_robin as the child policy.
- EXPECT_EQ(backends_[0]->service_.request_count(), kNumRpcs);
- for (size_t i = 1; i < backends_.size(); ++i) {
- EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
- }
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
-TEST_F(SingleBalancerTest, SwapChildPolicy) {
- SetNextResolutionAllBalancers(
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"grpclb\":{\n"
- " \"childPolicy\":[\n"
- " { \"pick_first\":{} }\n"
- " ]\n"
- " } }\n"
- " ]\n"
- "}");
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- const size_t kNumRpcs = num_backends_ * 2;
- CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
- // Check that all requests went to the first backend. This verifies
- // that we used pick_first instead of round_robin as the child policy.
- EXPECT_EQ(backends_[0]->service_.request_count(), kNumRpcs);
- for (size_t i = 1; i < backends_.size(); ++i) {
- EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
- }
- // Send new resolution that removes child policy from service config.
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"does_not_exist\":{} },\n"
+ " { \"grpclb\":{} }\n"
+ " ]\n"
+ "}");
+ // Try to connect.
+ EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(true));
+ // Should go into state TRANSIENT_FAILURE when we enter fallback mode.
+ const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(1);
+ grpc_connectivity_state state;
+ while ((state = channel_->GetState(false)) !=
+ GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ ASSERT_TRUE(channel_->WaitForStateChange(state, deadline));
+ }
+ // Check LB policy name for the channel.
+ EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
+TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) {
+ SetNextResolutionAllBalancers(
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"grpclb\":{\n"
+ " \"childPolicy\":[\n"
+ " { \"pick_first\":{} }\n"
+ " ]\n"
+ " } }\n"
+ " ]\n"
+ "}");
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ const size_t kNumRpcs = num_backends_ * 2;
+ CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // Check that all requests went to the first backend. This verifies
+ // that we used pick_first instead of round_robin as the child policy.
+ EXPECT_EQ(backends_[0]->service_.request_count(), kNumRpcs);
+ for (size_t i = 1; i < backends_.size(); ++i) {
+ EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
+ }
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ // Check LB policy name for the channel.
+ EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
+TEST_F(SingleBalancerTest, SwapChildPolicy) {
+ SetNextResolutionAllBalancers(
+ "{\n"
+ " \"loadBalancingConfig\":[\n"
+ " { \"grpclb\":{\n"
+ " \"childPolicy\":[\n"
+ " { \"pick_first\":{} }\n"
+ " ]\n"
+ " } }\n"
+ " ]\n"
+ "}");
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ const size_t kNumRpcs = num_backends_ * 2;
+ CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
+ // Check that all requests went to the first backend. This verifies
+ // that we used pick_first instead of round_robin as the child policy.
+ EXPECT_EQ(backends_[0]->service_.request_count(), kNumRpcs);
+ for (size_t i = 1; i < backends_.size(); ++i) {
+ EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
+ }
+ // Send new resolution that removes child policy from service config.
SetNextResolutionAllBalancers();
- WaitForAllBackends();
- CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
- // Check that every backend saw the same number of requests. This verifies
- // that we used round_robin.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(backends_[i]->service_.request_count(), 2UL);
- }
- // Done.
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
-TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
- SetNextResolutionAllBalancers();
- // Same backend listed twice.
- std::vector<int> ports;
- ports.push_back(backends_[0]->port_);
- ports.push_back(backends_[0]->port_);
- const size_t kNumRpcsPerAddress = 10;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
- // We need to wait for the backend to come online.
- WaitForBackend(0);
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * ports.size());
- // Backend should have gotten 20 requests.
- EXPECT_EQ(kNumRpcsPerAddress * 2, backends_[0]->service_.request_count());
- // And they should have come from a single client port, because of
- // subchannel sharing.
- EXPECT_EQ(1UL, backends_[0]->service_.clients().size());
- balancers_[0]->service_.NotifyDoneWithServerlists();
-}
-
-TEST_F(SingleBalancerTest, SecureNaming) {
- ResetStub(0, kApplicationTargetName_ + ";lb");
+ WaitForAllBackends();
+ CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
+ // Check that every backend saw the same number of requests. This verifies
+ // that we used round_robin.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(backends_[i]->service_.request_count(), 2UL);
+ }
+ // Done.
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ // Check LB policy name for the channel.
+ EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
+TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
+ SetNextResolutionAllBalancers();
+ // Same backend listed twice.
+ std::vector<int> ports;
+ ports.push_back(backends_[0]->port_);
+ ports.push_back(backends_[0]->port_);
+ const size_t kNumRpcsPerAddress = 10;
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
+ // We need to wait for the backend to come online.
+ WaitForBackend(0);
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * ports.size());
+ // Backend should have gotten 20 requests.
+ EXPECT_EQ(kNumRpcsPerAddress * 2, backends_[0]->service_.request_count());
+ // And they should have come from a single client port, because of
+ // subchannel sharing.
+ EXPECT_EQ(1UL, backends_[0]->service_.clients().size());
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+}
+
+TEST_F(SingleBalancerTest, SecureNaming) {
+ ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({AddressData{balancers_[0]->port_, "lb"}});
- const size_t kNumRpcsPerAddress = 100;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // Make sure that trying to connect works without a call.
- channel_->GetState(true /* try_to_connect */);
- // We need to wait for all backends to come online.
- WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
-
- // Each backend should have gotten 100 requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
- }
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- // Check LB policy name for the channel.
- EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
-}
-
-TEST_F(SingleBalancerTest, SecureNamingDeathTest) {
- ::testing::FLAGS_gtest_death_test_style = "threadsafe";
- // Make sure that we blow up (via abort() from the security connector) when
- // the name from the balancer doesn't match expectations.
- ASSERT_DEATH_IF_SUPPORTED(
- {
- ResetStub(0, kApplicationTargetName_ + ";lb");
+ const size_t kNumRpcsPerAddress = 100;
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ // Make sure that trying to connect works without a call.
+ channel_->GetState(true /* try_to_connect */);
+ // We need to wait for all backends to come online.
+ WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
+
+ // Each backend should have gotten 100 requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
+ }
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ // Check LB policy name for the channel.
+ EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
+}
+
+TEST_F(SingleBalancerTest, SecureNamingDeathTest) {
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+ // Make sure that we blow up (via abort() from the security connector) when
+ // the name from the balancer doesn't match expectations.
+ ASSERT_DEATH_IF_SUPPORTED(
+ {
+ ResetStub(0, kApplicationTargetName_ + ";lb");
SetNextResolution({AddressData{balancers_[0]->port_, "woops"}});
- channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
- },
- "");
-}
-
-TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
- SetNextResolutionAllBalancers();
- const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
- const int kCallDeadlineMs = kServerlistDelayMs * 2;
- // First response is an empty serverlist, sent right away.
- ScheduleResponseForBalancer(0, LoadBalanceResponse(), 0);
- // Send non-empty serverlist only after kServerlistDelayMs
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- kServerlistDelayMs);
- const auto t0 = system_clock::now();
- // Client will block: LB will initially send empty serverlist.
- CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */);
- const auto ellapsed_ms =
- std::chrono::duration_cast<std::chrono::milliseconds>(
- system_clock::now() - t0);
- // but eventually, the LB sends a serverlist update that allows the call to
- // proceed. The call delay must be larger than the delay in sending the
- // populated serverlist but under the call's deadline (which is enforced by
- // the call's deadline).
- EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs);
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent two responses.
- EXPECT_EQ(2U, balancers_[0]->service_.response_count());
-}
-
-TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
- SetNextResolutionAllBalancers();
- const size_t kNumUnreachableServers = 5;
- std::vector<int> ports;
- for (size_t i = 0; i < kNumUnreachableServers; ++i) {
- ports.push_back(grpc_pick_unused_port_or_die());
- }
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
- const Status status = SendRpc();
- // The error shouldn't be DEADLINE_EXCEEDED.
- EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-}
-
-TEST_F(SingleBalancerTest, Fallback) {
- SetNextResolutionAllBalancers();
- const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
- const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
- const size_t kNumBackendsInResolution = backends_.size() / 2;
-
- ResetStub(kFallbackTimeoutMs);
+ channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
+ },
+ "");
+}
+
+TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
+ SetNextResolutionAllBalancers();
+ const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
+ const int kCallDeadlineMs = kServerlistDelayMs * 2;
+ // First response is an empty serverlist, sent right away.
+ ScheduleResponseForBalancer(0, LoadBalanceResponse(), 0);
+ // Send non-empty serverlist only after kServerlistDelayMs
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ kServerlistDelayMs);
+ const auto t0 = system_clock::now();
+ // Client will block: LB will initially send empty serverlist.
+ CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */);
+ const auto ellapsed_ms =
+ std::chrono::duration_cast<std::chrono::milliseconds>(
+ system_clock::now() - t0);
+ // but eventually, the LB sends a serverlist update that allows the call to
+ // proceed. The call delay must be larger than the delay in sending the
+ // populated serverlist but under the call's deadline (which is enforced by
+ // the call's deadline).
+ EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs);
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent two responses.
+ EXPECT_EQ(2U, balancers_[0]->service_.response_count());
+}
+
+TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumUnreachableServers = 5;
+ std::vector<int> ports;
+ for (size_t i = 0; i < kNumUnreachableServers; ++i) {
+ ports.push_back(grpc_pick_unused_port_or_die());
+ }
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
+ const Status status = SendRpc();
+ // The error shouldn't be DEADLINE_EXCEEDED.
+ EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+}
+
+TEST_F(SingleBalancerTest, Fallback) {
+ SetNextResolutionAllBalancers();
+ const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
+ const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
+ const size_t kNumBackendsInResolution = backends_.size() / 2;
+
+ ResetStub(kFallbackTimeoutMs);
std::vector<AddressData> balancer_addresses;
balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
std::vector<AddressData> backend_addresses;
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
- }
+ }
SetNextResolution(balancer_addresses, backend_addresses);
-
- // Send non-empty serverlist only after kServerlistDelayMs.
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendsInResolution /* start_index */), {}),
- kServerlistDelayMs);
-
- // Wait until all the fallback backends are reachable.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- WaitForBackend(i);
- }
-
- // The first request.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(kNumBackendsInResolution);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
-
- // Fallback is used: each backend returned by the resolver should have
- // gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(1U, backends_[i]->service_.request_count());
- }
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- EXPECT_EQ(0U, backends_[i]->service_.request_count());
- }
-
- // Wait until the serverlist reception has been processed and all backends
- // in the serverlist are reachable.
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- WaitForBackend(i);
- }
-
- // Send out the second request.
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(backends_.size() - kNumBackendsInResolution);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
-
- // Serverlist is used: each backend returned by the balancer should
- // have gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(0U, backends_[i]->service_.request_count());
- }
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- EXPECT_EQ(1U, backends_[i]->service_.request_count());
- }
-
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-}
-
-TEST_F(SingleBalancerTest, FallbackUpdate) {
- SetNextResolutionAllBalancers();
- const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
- const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
- const size_t kNumBackendsInResolution = backends_.size() / 3;
- const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3;
-
- ResetStub(kFallbackTimeoutMs);
+
+ // Send non-empty serverlist only after kServerlistDelayMs.
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumBackendsInResolution /* start_index */), {}),
+ kServerlistDelayMs);
+
+ // Wait until all the fallback backends are reachable.
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ WaitForBackend(i);
+ }
+
+ // The first request.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(kNumBackendsInResolution);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+
+ // Fallback is used: each backend returned by the resolver should have
+ // gotten one request.
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ EXPECT_EQ(1U, backends_[i]->service_.request_count());
+ }
+ for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
+ EXPECT_EQ(0U, backends_[i]->service_.request_count());
+ }
+
+ // Wait until the serverlist reception has been processed and all backends
+ // in the serverlist are reachable.
+ for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
+ WaitForBackend(i);
+ }
+
+ // Send out the second request.
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(backends_.size() - kNumBackendsInResolution);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+
+ // Serverlist is used: each backend returned by the balancer should
+ // have gotten one request.
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ EXPECT_EQ(0U, backends_[i]->service_.request_count());
+ }
+ for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
+ EXPECT_EQ(1U, backends_[i]->service_.request_count());
+ }
+
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+}
+
+TEST_F(SingleBalancerTest, FallbackUpdate) {
+ SetNextResolutionAllBalancers();
+ const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
+ const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
+ const size_t kNumBackendsInResolution = backends_.size() / 3;
+ const size_t kNumBackendsInResolutionUpdate = backends_.size() / 3;
+
+ ResetStub(kFallbackTimeoutMs);
std::vector<AddressData> balancer_addresses;
balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
std::vector<AddressData> backend_addresses;
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
- }
+ }
SetNextResolution(balancer_addresses, backend_addresses);
-
- // Send non-empty serverlist only after kServerlistDelayMs.
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendsInResolution +
- kNumBackendsInResolutionUpdate /* start_index */),
- {}),
- kServerlistDelayMs);
-
- // Wait until all the fallback backends are reachable.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- WaitForBackend(i);
- }
-
- // The first request.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(kNumBackendsInResolution);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
-
- // Fallback is used: each backend returned by the resolver should have
- // gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(1U, backends_[i]->service_.request_count());
- }
- for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
- EXPECT_EQ(0U, backends_[i]->service_.request_count());
- }
-
+
+ // Send non-empty serverlist only after kServerlistDelayMs.
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumBackendsInResolution +
+ kNumBackendsInResolutionUpdate /* start_index */),
+ {}),
+ kServerlistDelayMs);
+
+ // Wait until all the fallback backends are reachable.
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ WaitForBackend(i);
+ }
+
+ // The first request.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(kNumBackendsInResolution);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+
+ // Fallback is used: each backend returned by the resolver should have
+ // gotten one request.
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ EXPECT_EQ(1U, backends_[i]->service_.request_count());
+ }
+ for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) {
+ EXPECT_EQ(0U, backends_[i]->service_.request_count());
+ }
+
balancer_addresses.clear();
balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
backend_addresses.clear();
- for (size_t i = kNumBackendsInResolution;
- i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
+ for (size_t i = kNumBackendsInResolution;
+ i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
- }
+ }
SetNextResolution(balancer_addresses, backend_addresses);
-
- // Wait until the resolution update has been processed and all the new
- // fallback backends are reachable.
- for (size_t i = kNumBackendsInResolution;
- i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
- WaitForBackend(i);
- }
-
- // Send out the second request.
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(kNumBackendsInResolutionUpdate);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
-
- // The resolution update is used: each backend in the resolution update should
- // have gotten one request.
- for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
- EXPECT_EQ(0U, backends_[i]->service_.request_count());
- }
- for (size_t i = kNumBackendsInResolution;
- i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
- EXPECT_EQ(1U, backends_[i]->service_.request_count());
- }
- for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
- i < backends_.size(); ++i) {
- EXPECT_EQ(0U, backends_[i]->service_.request_count());
- }
-
- // Wait until the serverlist reception has been processed and all backends
- // in the serverlist are reachable.
- for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
- i < backends_.size(); ++i) {
- WaitForBackend(i);
- }
-
- // Send out the third request.
- gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
- CheckRpcSendOk(backends_.size() - kNumBackendsInResolution -
- kNumBackendsInResolutionUpdate);
- gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
-
- // Serverlist is used: each backend returned by the balancer should
- // have gotten one request.
- for (size_t i = 0;
- i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
- EXPECT_EQ(0U, backends_[i]->service_.request_count());
- }
- for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
- i < backends_.size(); ++i) {
- EXPECT_EQ(1U, backends_[i]->service_.request_count());
- }
-
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-}
-
-TEST_F(SingleBalancerTest,
- FallbackAfterStartup_LoseContactWithBalancerThenBackends) {
- // First two backends are fallback, last two are pointed to by balancer.
- const size_t kNumFallbackBackends = 2;
- const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
+
+ // Wait until the resolution update has been processed and all the new
+ // fallback backends are reachable.
+ for (size_t i = kNumBackendsInResolution;
+ i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
+ WaitForBackend(i);
+ }
+
+ // Send out the second request.
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(kNumBackendsInResolutionUpdate);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+
+ // The resolution update is used: each backend in the resolution update should
+ // have gotten one request.
+ for (size_t i = 0; i < kNumBackendsInResolution; ++i) {
+ EXPECT_EQ(0U, backends_[i]->service_.request_count());
+ }
+ for (size_t i = kNumBackendsInResolution;
+ i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
+ EXPECT_EQ(1U, backends_[i]->service_.request_count());
+ }
+ for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
+ i < backends_.size(); ++i) {
+ EXPECT_EQ(0U, backends_[i]->service_.request_count());
+ }
+
+ // Wait until the serverlist reception has been processed and all backends
+ // in the serverlist are reachable.
+ for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
+ i < backends_.size(); ++i) {
+ WaitForBackend(i);
+ }
+
+ // Send out the third request.
+ gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
+ CheckRpcSendOk(backends_.size() - kNumBackendsInResolution -
+ kNumBackendsInResolutionUpdate);
+ gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
+
+ // Serverlist is used: each backend returned by the balancer should
+ // have gotten one request.
+ for (size_t i = 0;
+ i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) {
+ EXPECT_EQ(0U, backends_[i]->service_.request_count());
+ }
+ for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate;
+ i < backends_.size(); ++i) {
+ EXPECT_EQ(1U, backends_[i]->service_.request_count());
+ }
+
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+}
+
+TEST_F(SingleBalancerTest,
+ FallbackAfterStartup_LoseContactWithBalancerThenBackends) {
+ // First two backends are fallback, last two are pointed to by balancer.
+ const size_t kNumFallbackBackends = 2;
+ const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
std::vector<AddressData> backend_addresses;
- for (size_t i = 0; i < kNumFallbackBackends; ++i) {
+ for (size_t i = 0; i < kNumFallbackBackends; ++i) {
backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
- }
+ }
std::vector<AddressData> balancer_addresses;
- for (size_t i = 0; i < balancers_.size(); ++i) {
+ for (size_t i = 0; i < balancers_.size(); ++i) {
balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
- }
+ }
SetNextResolution(balancer_addresses, backend_addresses);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
- // Try to connect.
- channel_->GetState(true /* try_to_connect */);
- WaitForAllBackends(1 /* num_requests_multiple_of */,
- kNumFallbackBackends /* start_index */);
- // Stop balancer. RPCs should continue going to backends from balancer.
- balancers_[0]->Shutdown();
- CheckRpcSendOk(100 * kNumBalancerBackends);
- for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
- EXPECT_EQ(100UL, backends_[i]->service_.request_count());
- }
- // Stop backends from balancer. This should put us in fallback mode.
- for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
- ShutdownBackend(i);
- }
- WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */,
- kNumFallbackBackends /* stop_index */);
- // Restart the backends from the balancer. We should *not* start
- // sending traffic back to them at this point (although the behavior
- // in xds may be different).
- for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
- StartBackend(i);
- }
- CheckRpcSendOk(100 * kNumBalancerBackends);
- for (size_t i = 0; i < kNumFallbackBackends; ++i) {
- EXPECT_EQ(100UL, backends_[i]->service_.request_count());
- }
- // Now start the balancer again. This should cause us to exit
- // fallback mode.
- balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
- WaitForAllBackends(1 /* num_requests_multiple_of */,
- kNumFallbackBackends /* start_index */);
-}
-
-TEST_F(SingleBalancerTest,
- FallbackAfterStartup_LoseContactWithBackendsThenBalancer) {
- // First two backends are fallback, last two are pointed to by balancer.
- const size_t kNumFallbackBackends = 2;
- const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
+ ScheduleResponseForBalancer(0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
+ // Try to connect.
+ channel_->GetState(true /* try_to_connect */);
+ WaitForAllBackends(1 /* num_requests_multiple_of */,
+ kNumFallbackBackends /* start_index */);
+ // Stop balancer. RPCs should continue going to backends from balancer.
+ balancers_[0]->Shutdown();
+ CheckRpcSendOk(100 * kNumBalancerBackends);
+ for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
+ EXPECT_EQ(100UL, backends_[i]->service_.request_count());
+ }
+ // Stop backends from balancer. This should put us in fallback mode.
+ for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
+ ShutdownBackend(i);
+ }
+ WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */,
+ kNumFallbackBackends /* stop_index */);
+ // Restart the backends from the balancer. We should *not* start
+ // sending traffic back to them at this point (although the behavior
+ // in xds may be different).
+ for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
+ StartBackend(i);
+ }
+ CheckRpcSendOk(100 * kNumBalancerBackends);
+ for (size_t i = 0; i < kNumFallbackBackends; ++i) {
+ EXPECT_EQ(100UL, backends_[i]->service_.request_count());
+ }
+ // Now start the balancer again. This should cause us to exit
+ // fallback mode.
+ balancers_[0]->Start(server_host_);
+ ScheduleResponseForBalancer(0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
+ WaitForAllBackends(1 /* num_requests_multiple_of */,
+ kNumFallbackBackends /* start_index */);
+}
+
+TEST_F(SingleBalancerTest,
+ FallbackAfterStartup_LoseContactWithBackendsThenBalancer) {
+ // First two backends are fallback, last two are pointed to by balancer.
+ const size_t kNumFallbackBackends = 2;
+ const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
std::vector<AddressData> backend_addresses;
- for (size_t i = 0; i < kNumFallbackBackends; ++i) {
+ for (size_t i = 0; i < kNumFallbackBackends; ++i) {
backend_addresses.emplace_back(AddressData{backends_[i]->port_, ""});
- }
+ }
std::vector<AddressData> balancer_addresses;
- for (size_t i = 0; i < balancers_.size(); ++i) {
+ for (size_t i = 0; i < balancers_.size(); ++i) {
balancer_addresses.emplace_back(AddressData{balancers_[i]->port_, ""});
- }
+ }
SetNextResolution(balancer_addresses, backend_addresses);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
- // Try to connect.
- channel_->GetState(true /* try_to_connect */);
- WaitForAllBackends(1 /* num_requests_multiple_of */,
- kNumFallbackBackends /* start_index */);
- // Stop backends from balancer. Since we are still in contact with
- // the balancer at this point, RPCs should be failing.
- for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
- ShutdownBackend(i);
- }
- CheckRpcSendFailure();
- // Stop balancer. This should put us in fallback mode.
- balancers_[0]->Shutdown();
- WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */,
- kNumFallbackBackends /* stop_index */);
- // Restart the backends from the balancer. We should *not* start
- // sending traffic back to them at this point (although the behavior
- // in xds may be different).
- for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
- StartBackend(i);
- }
- CheckRpcSendOk(100 * kNumBalancerBackends);
- for (size_t i = 0; i < kNumFallbackBackends; ++i) {
- EXPECT_EQ(100UL, backends_[i]->service_.request_count());
- }
- // Now start the balancer again. This should cause us to exit
- // fallback mode.
- balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumFallbackBackends), {}),
- 0);
- WaitForAllBackends(1 /* num_requests_multiple_of */,
- kNumFallbackBackends /* start_index */);
-}
-
-TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
- const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
- // Return an unreachable balancer and one fallback backend.
+ ScheduleResponseForBalancer(0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
+ // Try to connect.
+ channel_->GetState(true /* try_to_connect */);
+ WaitForAllBackends(1 /* num_requests_multiple_of */,
+ kNumFallbackBackends /* start_index */);
+ // Stop backends from balancer. Since we are still in contact with
+ // the balancer at this point, RPCs should be failing.
+ for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
+ ShutdownBackend(i);
+ }
+ CheckRpcSendFailure();
+ // Stop balancer. This should put us in fallback mode.
+ balancers_[0]->Shutdown();
+ WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */,
+ kNumFallbackBackends /* stop_index */);
+ // Restart the backends from the balancer. We should *not* start
+ // sending traffic back to them at this point (although the behavior
+ // in xds may be different).
+ for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
+ StartBackend(i);
+ }
+ CheckRpcSendOk(100 * kNumBalancerBackends);
+ for (size_t i = 0; i < kNumFallbackBackends; ++i) {
+ EXPECT_EQ(100UL, backends_[i]->service_.request_count());
+ }
+ // Now start the balancer again. This should cause us to exit
+ // fallback mode.
+ balancers_[0]->Start(server_host_);
+ ScheduleResponseForBalancer(0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumFallbackBackends), {}),
+ 0);
+ WaitForAllBackends(1 /* num_requests_multiple_of */,
+ kNumFallbackBackends /* start_index */);
+}
+
+TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
+ const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
+ ResetStub(kFallbackTimeoutMs);
+ // Return an unreachable balancer and one fallback backend.
std::vector<AddressData> balancer_addresses;
balancer_addresses.emplace_back(
AddressData{grpc_pick_unused_port_or_die(), ""});
std::vector<AddressData> backend_addresses;
backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
SetNextResolution(balancer_addresses, backend_addresses);
- // Send RPC with deadline less than the fallback timeout and make sure it
- // succeeds.
- CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
- /* wait_for_ready */ false);
-}
-
-TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
- const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
- ResetStub(kFallbackTimeoutMs);
+ // Send RPC with deadline less than the fallback timeout and make sure it
+ // succeeds.
+ CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
+ /* wait_for_ready */ false);
+}
+
+TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
+ const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
+ ResetStub(kFallbackTimeoutMs);
// Return one balancer and one fallback backend.
std::vector<AddressData> balancer_addresses;
balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
std::vector<AddressData> backend_addresses;
backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
SetNextResolution(balancer_addresses, backend_addresses);
- // Balancer drops call without sending a serverlist.
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // Send RPC with deadline less than the fallback timeout and make sure it
- // succeeds.
- CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
- /* wait_for_ready */ false);
-}
-
+ // Balancer drops call without sending a serverlist.
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // Send RPC with deadline less than the fallback timeout and make sure it
+ // succeeds.
+ CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
+ /* wait_for_ready */ false);
+}
+
TEST_F(SingleBalancerTest, FallbackControlledByBalancer_BeforeFirstServerlist) {
const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
ResetStub(kFallbackTimeoutMs);
@@ -1371,29 +1371,29 @@ TEST_F(SingleBalancerTest, FallbackControlledByBalancer_AfterFirstServerlist) {
WaitForBackend(1);
}
-TEST_F(SingleBalancerTest, BackendsRestart) {
- SetNextResolutionAllBalancers();
- const size_t kNumRpcsPerAddress = 100;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // Make sure that trying to connect works without a call.
- channel_->GetState(true /* try_to_connect */);
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
- // Stop backends. RPCs should fail.
- ShutdownAllBackends();
- CheckRpcSendFailure();
- // Restart backends. RPCs should start succeeding again.
- StartAllBackends();
- CheckRpcSendOk(1 /* times */, 2000 /* timeout_ms */,
- true /* wait_for_ready */);
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-}
-
+TEST_F(SingleBalancerTest, BackendsRestart) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumRpcsPerAddress = 100;
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ // Make sure that trying to connect works without a call.
+ channel_->GetState(true /* try_to_connect */);
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
+ // Stop backends. RPCs should fail.
+ ShutdownAllBackends();
+ CheckRpcSendFailure();
+ // Restart backends. RPCs should start succeeding again.
+ StartAllBackends();
+ CheckRpcSendOk(1 /* times */, 2000 /* timeout_ms */,
+ true /* wait_for_ready */);
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+}
+
TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) {
constexpr char kServiceConfigWithTarget[] =
"{\n"
@@ -1415,277 +1415,277 @@ TEST_F(SingleBalancerTest, ServiceNameFromLbPolicyConfig) {
EXPECT_EQ(balancers_[0]->service_.service_names().back(), "test_service");
}
-class UpdatesTest : public GrpclbEnd2endTest {
- public:
- UpdatesTest() : GrpclbEnd2endTest(4, 3, 0) {}
-};
-
-TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
- SetNextResolutionAllBalancers();
- const std::vector<int> first_backend{GetBackendPorts()[0]};
- const std::vector<int> second_backend{GetBackendPorts()[1]};
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
-
- // Wait until the first backend is ready.
- WaitForBackend(0);
-
- // Send 10 requests.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
-
- // All 10 requests should have gone to the first backend.
- EXPECT_EQ(10U, backends_[0]->service_.request_count());
-
- // Balancer 0 got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- EXPECT_EQ(0U, balancers_[1]->service_.request_count());
- EXPECT_EQ(0U, balancers_[1]->service_.response_count());
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-
- std::vector<AddressData> addresses;
+class UpdatesTest : public GrpclbEnd2endTest {
+ public:
+ UpdatesTest() : GrpclbEnd2endTest(4, 3, 0) {}
+};
+
+TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) {
+ SetNextResolutionAllBalancers();
+ const std::vector<int> first_backend{GetBackendPorts()[0]};
+ const std::vector<int> second_backend{GetBackendPorts()[1]};
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
+ ScheduleResponseForBalancer(
+ 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+
+ // Wait until the first backend is ready.
+ WaitForBackend(0);
+
+ // Send 10 requests.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+
+ // All 10 requests should have gone to the first backend.
+ EXPECT_EQ(10U, backends_[0]->service_.request_count());
+
+ // Balancer 0 got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+
+ std::vector<AddressData> addresses;
addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
- gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
- SetNextResolution(addresses);
- gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
-
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
- gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
- // Send 10 seconds worth of RPCs
- do {
- CheckRpcSendOk();
- } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
- // The current LB call is still working, so grpclb continued using it to the
- // first balancer, which doesn't assign the second backend.
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
-
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- EXPECT_EQ(0U, balancers_[1]->service_.request_count());
- EXPECT_EQ(0U, balancers_[1]->service_.response_count());
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-}
-
-// Send an update with the same set of LBs as the one in SetUp() in order to
-// verify that the LB channel inside grpclb keeps the initial connection (which
-// by definition is also present in the update).
-TEST_F(UpdatesTest, UpdateBalancersRepeated) {
- SetNextResolutionAllBalancers();
- const std::vector<int> first_backend{GetBackendPorts()[0]};
- const std::vector<int> second_backend{GetBackendPorts()[0]};
-
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
-
- // Wait until the first backend is ready.
- WaitForBackend(0);
-
- // Send 10 requests.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
-
- // All 10 requests should have gone to the first backend.
- EXPECT_EQ(10U, backends_[0]->service_.request_count());
-
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // Balancer 0 got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- EXPECT_EQ(0U, balancers_[1]->service_.request_count());
- EXPECT_EQ(0U, balancers_[1]->service_.response_count());
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-
- std::vector<AddressData> addresses;
+ gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
+ SetNextResolution(addresses);
+ gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
+
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+ gpr_timespec deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
+ // Send 10 seconds worth of RPCs
+ do {
+ CheckRpcSendOk();
+ } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
+ // The current LB call is still working, so grpclb continued using it to the
+ // first balancer, which doesn't assign the second backend.
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+}
+
+// Send an update with the same set of LBs as the one in SetUp() in order to
+// verify that the LB channel inside grpclb keeps the initial connection (which
+// by definition is also present in the update).
+TEST_F(UpdatesTest, UpdateBalancersRepeated) {
+ SetNextResolutionAllBalancers();
+ const std::vector<int> first_backend{GetBackendPorts()[0]};
+ const std::vector<int> second_backend{GetBackendPorts()[0]};
+
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
+ ScheduleResponseForBalancer(
+ 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+
+ // Wait until the first backend is ready.
+ WaitForBackend(0);
+
+ // Send 10 requests.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+
+ // All 10 requests should have gone to the first backend.
+ EXPECT_EQ(10U, backends_[0]->service_.request_count());
+
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // Balancer 0 got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+
+ std::vector<AddressData> addresses;
addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
addresses.emplace_back(AddressData{balancers_[2]->port_, ""});
- gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
- SetNextResolution(addresses);
- gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
-
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
- gpr_timespec deadline = gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
- // Send 10 seconds worth of RPCs
- do {
- CheckRpcSendOk();
- } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
- // grpclb continued using the original LB call to the first balancer, which
- // doesn't assign the second backend.
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
- balancers_[0]->service_.NotifyDoneWithServerlists();
-
- addresses.clear();
+ gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
+ SetNextResolution(addresses);
+ gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
+
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+ gpr_timespec deadline = gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
+ // Send 10 seconds worth of RPCs
+ do {
+ CheckRpcSendOk();
+ } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
+ // grpclb continued using the original LB call to the first balancer, which
+ // doesn't assign the second backend.
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+
+ addresses.clear();
addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
- gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
- SetNextResolution(addresses);
- gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
-
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
- deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_millis(10000, GPR_TIMESPAN));
- // Send 10 seconds worth of RPCs
- do {
- CheckRpcSendOk();
- } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
- // grpclb continued using the original LB call to the first balancer, which
- // doesn't assign the second backend.
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
- balancers_[0]->service_.NotifyDoneWithServerlists();
-}
-
-TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
- std::vector<AddressData> addresses;
+ gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
+ SetNextResolution(addresses);
+ gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
+
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+ deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_millis(10000, GPR_TIMESPAN));
+ // Send 10 seconds worth of RPCs
+ do {
+ CheckRpcSendOk();
+ } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
+ // grpclb continued using the original LB call to the first balancer, which
+ // doesn't assign the second backend.
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+}
+
+TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
+ std::vector<AddressData> addresses;
addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- SetNextResolution(addresses);
- const std::vector<int> first_backend{GetBackendPorts()[0]};
- const std::vector<int> second_backend{GetBackendPorts()[1]};
-
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
- ScheduleResponseForBalancer(
- 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
-
- // Start servers and send 10 RPCs per server.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- // All 10 requests should have gone to the first backend.
- EXPECT_EQ(10U, backends_[0]->service_.request_count());
-
- // Kill balancer 0
- gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
- balancers_[0]->Shutdown();
- gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
-
- // This is serviced by the existing RR policy
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- // All 10 requests should again have gone to the first backend.
- EXPECT_EQ(20U, backends_[0]->service_.request_count());
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
-
- // Balancer 0 got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- EXPECT_EQ(0U, balancers_[1]->service_.request_count());
- EXPECT_EQ(0U, balancers_[1]->service_.response_count());
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-
- addresses.clear();
+ SetNextResolution(addresses);
+ const std::vector<int> first_backend{GetBackendPorts()[0]};
+ const std::vector<int> second_backend{GetBackendPorts()[1]};
+
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
+ ScheduleResponseForBalancer(
+ 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
+
+ // Start servers and send 10 RPCs per server.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+ // All 10 requests should have gone to the first backend.
+ EXPECT_EQ(10U, backends_[0]->service_.request_count());
+
+ // Kill balancer 0
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
+ balancers_[0]->Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
+
+ // This is serviced by the existing RR policy
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+ // All 10 requests should again have gone to the first backend.
+ EXPECT_EQ(20U, backends_[0]->service_.request_count());
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+
+ // Balancer 0 got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+
+ addresses.clear();
addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
- gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
- SetNextResolution(addresses);
- gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
-
- // Wait until update has been processed, as signaled by the second backend
- // receiving a request. In the meantime, the client continues to be serviced
- // (by the first backend) without interruption.
- EXPECT_EQ(0U, backends_[1]->service_.request_count());
- WaitForBackend(1);
-
- // This is serviced by the updated RR policy
- backends_[1]->service_.ResetCounters();
- gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
- // All 10 requests should have gone to the second backend.
- EXPECT_EQ(10U, backends_[1]->service_.request_count());
-
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- // The second balancer, published as part of the first update, may end up
- // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer
- // firing races with the arrival of the update containing the second
- // balancer.
- EXPECT_GE(balancers_[1]->service_.request_count(), 1U);
- EXPECT_GE(balancers_[1]->service_.response_count(), 1U);
- EXPECT_LE(balancers_[1]->service_.request_count(), 2U);
- EXPECT_LE(balancers_[1]->service_.response_count(), 2U);
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-}
-
-TEST_F(UpdatesTest, ReresolveDeadBackend) {
- ResetStub(500);
- // The first resolution contains the addresses of a balancer that never
- // responds, and a fallback backend.
+ gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
+ SetNextResolution(addresses);
+ gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
+
+ // Wait until update has been processed, as signaled by the second backend
+ // receiving a request. In the meantime, the client continues to be serviced
+ // (by the first backend) without interruption.
+ EXPECT_EQ(0U, backends_[1]->service_.request_count());
+ WaitForBackend(1);
+
+ // This is serviced by the updated RR policy
+ backends_[1]->service_.ResetCounters();
+ gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
+ // All 10 requests should have gone to the second backend.
+ EXPECT_EQ(10U, backends_[1]->service_.request_count());
+
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ // The second balancer, published as part of the first update, may end up
+ // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer
+ // firing races with the arrival of the update containing the second
+ // balancer.
+ EXPECT_GE(balancers_[1]->service_.request_count(), 1U);
+ EXPECT_GE(balancers_[1]->service_.response_count(), 1U);
+ EXPECT_LE(balancers_[1]->service_.request_count(), 2U);
+ EXPECT_LE(balancers_[1]->service_.response_count(), 2U);
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+}
+
+TEST_F(UpdatesTest, ReresolveDeadBackend) {
+ ResetStub(500);
+ // The first resolution contains the addresses of a balancer that never
+ // responds, and a fallback backend.
std::vector<AddressData> balancer_addresses;
balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
std::vector<AddressData> backend_addresses;
backend_addresses.emplace_back(AddressData{backends_[0]->port_, ""});
SetNextResolution(balancer_addresses, backend_addresses);
- // Ask channel to connect to trigger resolver creation.
- channel_->GetState(true);
- // The re-resolution result will contain the addresses of the same balancer
- // and a new fallback backend.
+ // Ask channel to connect to trigger resolver creation.
+ channel_->GetState(true);
+ // The re-resolution result will contain the addresses of the same balancer
+ // and a new fallback backend.
balancer_addresses.clear();
balancer_addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
backend_addresses.clear();
backend_addresses.emplace_back(AddressData{backends_[1]->port_, ""});
SetNextReresolutionResponse(balancer_addresses, backend_addresses);
-
- // Start servers and send 10 RPCs per server.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- // All 10 requests should have gone to the fallback backend.
- EXPECT_EQ(10U, backends_[0]->service_.request_count());
-
- // Kill backend 0.
- gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
- backends_[0]->Shutdown();
- gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
-
- // Wait until re-resolution has finished, as signaled by the second backend
- // receiving a request.
- WaitForBackend(1);
-
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- // All 10 requests should have gone to the second backend.
- EXPECT_EQ(10U, backends_[1]->service_.request_count());
-
- balancers_[0]->service_.NotifyDoneWithServerlists();
- balancers_[1]->service_.NotifyDoneWithServerlists();
- balancers_[2]->service_.NotifyDoneWithServerlists();
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- EXPECT_EQ(0U, balancers_[0]->service_.response_count());
- EXPECT_EQ(0U, balancers_[1]->service_.request_count());
- EXPECT_EQ(0U, balancers_[1]->service_.response_count());
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-}
-
-// TODO(juanlishen): Should be removed when the first response is always the
-// initial response. Currently, if client load reporting is not enabled, the
-// balancer doesn't send initial response. When the backend shuts down, an
-// unexpected re-resolution will happen. This test configuration is a workaround
-// for test ReresolveDeadBalancer.
-class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest {
- public:
- UpdatesWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 3, 2) {}
-};
-
-TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
+
+ // Start servers and send 10 RPCs per server.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+ // All 10 requests should have gone to the fallback backend.
+ EXPECT_EQ(10U, backends_[0]->service_.request_count());
+
+ // Kill backend 0.
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
+ backends_[0]->Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
+
+ // Wait until re-resolution has finished, as signaled by the second backend
+ // receiving a request.
+ WaitForBackend(1);
+
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+ // All 10 requests should have gone to the second backend.
+ EXPECT_EQ(10U, backends_[1]->service_.request_count());
+
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ balancers_[1]->service_.NotifyDoneWithServerlists();
+ balancers_[2]->service_.NotifyDoneWithServerlists();
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[0]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+}
+
+// TODO(juanlishen): Should be removed when the first response is always the
+// initial response. Currently, if client load reporting is not enabled, the
+// balancer doesn't send initial response. When the backend shuts down, an
+// unexpected re-resolution will happen. This test configuration is a workaround
+// for test ReresolveDeadBalancer.
+class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest {
+ public:
+ UpdatesWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 3, 2) {}
+};
+
+TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
const std::vector<int> first_backend{GetBackendPorts()[0]};
const std::vector<int> second_backend{GetBackendPorts()[1]};
ScheduleResponseForBalancer(
@@ -1693,337 +1693,337 @@ TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
ScheduleResponseForBalancer(
1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
- // Ask channel to connect to trigger resolver creation.
- channel_->GetState(true);
- std::vector<AddressData> addresses;
+ // Ask channel to connect to trigger resolver creation.
+ channel_->GetState(true);
+ std::vector<AddressData> addresses;
addresses.emplace_back(AddressData{balancers_[0]->port_, ""});
- SetNextResolution(addresses);
- addresses.clear();
+ SetNextResolution(addresses);
+ addresses.clear();
addresses.emplace_back(AddressData{balancers_[1]->port_, ""});
- SetNextReresolutionResponse(addresses);
-
- // Start servers and send 10 RPCs per server.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- // All 10 requests should have gone to the first backend.
- EXPECT_EQ(10U, backends_[0]->service_.request_count());
-
- // Kill backend 0.
- gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
- backends_[0]->Shutdown();
- gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
-
- CheckRpcSendFailure();
-
- // Balancer 0 got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- EXPECT_EQ(0U, balancers_[1]->service_.request_count());
- EXPECT_EQ(0U, balancers_[1]->service_.response_count());
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-
- // Kill balancer 0.
- gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
- balancers_[0]->Shutdown();
- gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
-
- // Wait until re-resolution has finished, as signaled by the second backend
- // receiving a request.
- WaitForBackend(1);
-
- // This is serviced by the new serverlist.
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- // All 10 requests should have gone to the second backend.
- EXPECT_EQ(10U, backends_[1]->service_.request_count());
-
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
- // After balancer 0 is killed, we restart an LB call immediately (because we
- // disconnect to a previously connected balancer). Although we will cancel
- // this call when the re-resolution update is done and another LB call restart
- // is needed, this old call may still succeed reaching the LB server if
- // re-resolution is slow. So balancer 1 may have received 2 requests and sent
- // 2 responses.
- EXPECT_GE(balancers_[1]->service_.request_count(), 1U);
- EXPECT_GE(balancers_[1]->service_.response_count(), 1U);
- EXPECT_LE(balancers_[1]->service_.request_count(), 2U);
- EXPECT_LE(balancers_[1]->service_.response_count(), 2U);
- EXPECT_EQ(0U, balancers_[2]->service_.request_count());
- EXPECT_EQ(0U, balancers_[2]->service_.response_count());
-}
-
-TEST_F(SingleBalancerTest, Drop) {
- SetNextResolutionAllBalancers();
- const size_t kNumRpcsPerAddress = 100;
- const int num_of_drop_by_rate_limiting_addresses = 1;
- const int num_of_drop_by_load_balancing_addresses = 2;
- const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
- num_of_drop_by_load_balancing_addresses;
- const int num_total_addresses = num_backends_ + num_of_drop_addresses;
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(),
- {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
- 0);
- // Wait until all backends are ready.
- WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs for each server and drop address.
- size_t num_drops = 0;
- for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
- EchoResponse response;
- const Status status = SendRpc(&response);
- if (!status.ok() &&
- status.error_message() == "Call dropped by load balancing policy") {
- ++num_drops;
- } else {
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage_);
- }
- }
- EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
- // Each backend should have gotten 100 requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
- }
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-}
-
-TEST_F(SingleBalancerTest, DropAllFirst) {
- SetNextResolutionAllBalancers();
- // All registered addresses are marked as "drop".
- const int num_of_drop_by_rate_limiting_addresses = 1;
- const int num_of_drop_by_load_balancing_addresses = 1;
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
- 0);
- const Status status = SendRpc(nullptr, 1000, true);
- EXPECT_FALSE(status.ok());
- EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
-}
-
-TEST_F(SingleBalancerTest, DropAll) {
- SetNextResolutionAllBalancers();
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- const int num_of_drop_by_rate_limiting_addresses = 1;
- const int num_of_drop_by_load_balancing_addresses = 1;
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
- 1000);
-
- // First call succeeds.
- CheckRpcSendOk();
- // But eventually, the update with only dropped servers is processed and calls
- // fail.
- Status status;
- do {
- status = SendRpc(nullptr, 1000, true);
- } while (status.ok());
- EXPECT_FALSE(status.ok());
- EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
-}
-
-class SingleBalancerWithClientLoadReportingTest : public GrpclbEnd2endTest {
- public:
- SingleBalancerWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 1, 3) {}
-};
-
-TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
- SetNextResolutionAllBalancers();
- const size_t kNumRpcsPerAddress = 100;
- ScheduleResponseForBalancer(
- 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
- 0);
- // Wait until all backends are ready.
- int num_ok = 0;
- int num_failure = 0;
- int num_drops = 0;
- std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
- // Send kNumRpcsPerAddress RPCs per server.
- CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
- // Each backend should have gotten 100 requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
- }
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-
+ SetNextReresolutionResponse(addresses);
+
+ // Start servers and send 10 RPCs per server.
+ gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
+ // All 10 requests should have gone to the first backend.
+ EXPECT_EQ(10U, backends_[0]->service_.request_count());
+
+ // Kill backend 0.
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
+ backends_[0]->Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
+
+ CheckRpcSendFailure();
+
+ // Balancer 0 got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[1]->service_.response_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+
+ // Kill balancer 0.
+ gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
+ balancers_[0]->Shutdown();
+ gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
+
+ // Wait until re-resolution has finished, as signaled by the second backend
+ // receiving a request.
+ WaitForBackend(1);
+
+ // This is serviced by the new serverlist.
+ gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
+ CheckRpcSendOk(10);
+ gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
+ // All 10 requests should have gone to the second backend.
+ EXPECT_EQ(10U, backends_[1]->service_.request_count());
+
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+ // After balancer 0 is killed, we restart an LB call immediately (because we
+ // disconnect to a previously connected balancer). Although we will cancel
+ // this call when the re-resolution update is done and another LB call restart
+ // is needed, this old call may still succeed reaching the LB server if
+ // re-resolution is slow. So balancer 1 may have received 2 requests and sent
+ // 2 responses.
+ EXPECT_GE(balancers_[1]->service_.request_count(), 1U);
+ EXPECT_GE(balancers_[1]->service_.response_count(), 1U);
+ EXPECT_LE(balancers_[1]->service_.request_count(), 2U);
+ EXPECT_LE(balancers_[1]->service_.response_count(), 2U);
+ EXPECT_EQ(0U, balancers_[2]->service_.request_count());
+ EXPECT_EQ(0U, balancers_[2]->service_.response_count());
+}
+
+TEST_F(SingleBalancerTest, Drop) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumRpcsPerAddress = 100;
+ const int num_of_drop_by_rate_limiting_addresses = 1;
+ const int num_of_drop_by_load_balancing_addresses = 2;
+ const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
+ num_of_drop_by_load_balancing_addresses;
+ const int num_total_addresses = num_backends_ + num_of_drop_addresses;
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(),
+ {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0);
+ // Wait until all backends are ready.
+ WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs for each server and drop address.
+ size_t num_drops = 0;
+ for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
+ EchoResponse response;
+ const Status status = SendRpc(&response);
+ if (!status.ok() &&
+ status.error_message() == "Call dropped by load balancing policy") {
+ ++num_drops;
+ } else {
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ EXPECT_EQ(response.message(), kRequestMessage_);
+ }
+ }
+ EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
+ // Each backend should have gotten 100 requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
+ }
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+}
+
+TEST_F(SingleBalancerTest, DropAllFirst) {
+ SetNextResolutionAllBalancers();
+ // All registered addresses are marked as "drop".
+ const int num_of_drop_by_rate_limiting_addresses = 1;
+ const int num_of_drop_by_load_balancing_addresses = 1;
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0);
+ const Status status = SendRpc(nullptr, 1000, true);
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
+}
+
+TEST_F(SingleBalancerTest, DropAll) {
+ SetNextResolutionAllBalancers();
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ const int num_of_drop_by_rate_limiting_addresses = 1;
+ const int num_of_drop_by_load_balancing_addresses = 1;
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 1000);
+
+ // First call succeeds.
+ CheckRpcSendOk();
+ // But eventually, the update with only dropped servers is processed and calls
+ // fail.
+ Status status;
+ do {
+ status = SendRpc(nullptr, 1000, true);
+ } while (status.ok());
+ EXPECT_FALSE(status.ok());
+ EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
+}
+
+class SingleBalancerWithClientLoadReportingTest : public GrpclbEnd2endTest {
+ public:
+ SingleBalancerWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 1, 3) {}
+};
+
+TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumRpcsPerAddress = 100;
+ ScheduleResponseForBalancer(
+ 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
+ 0);
+ // Wait until all backends are ready.
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
+ // Send kNumRpcsPerAddress RPCs per server.
+ CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
+ // Each backend should have gotten 100 requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
+ }
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+
ClientStats client_stats;
do {
client_stats += WaitForLoadReports();
} while (client_stats.num_calls_finished !=
kNumRpcsPerAddress * num_backends_ + num_ok);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
- client_stats.num_calls_started);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
- client_stats.num_calls_finished);
- EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + (num_ok + num_drops),
- client_stats.num_calls_finished_known_received);
- EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
-}
-
-TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
- SetNextResolutionAllBalancers();
- const size_t kNumBackendsFirstPass = 2;
- const size_t kNumBackendsSecondPass =
- backends_.size() - kNumBackendsFirstPass;
- // Balancer returns backends starting at index 1.
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(0, kNumBackendsFirstPass), {}),
- 0);
- // Wait until all backends returned by the balancer are ready.
- int num_ok = 0;
- int num_failure = 0;
- int num_drops = 0;
- std::tie(num_ok, num_failure, num_drops) =
- WaitForAllBackends(/* num_requests_multiple_of */ 1, /* start_index */ 0,
- /* stop_index */ kNumBackendsFirstPass);
- balancers_[0]->service_.NotifyDoneWithServerlists();
- ClientStats client_stats = WaitForLoadReports();
- EXPECT_EQ(static_cast<size_t>(num_ok), client_stats.num_calls_started);
- EXPECT_EQ(static_cast<size_t>(num_ok), client_stats.num_calls_finished);
- EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
- EXPECT_EQ(static_cast<size_t>(num_ok),
- client_stats.num_calls_finished_known_received);
- EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
- // Shut down the balancer.
- balancers_[0]->Shutdown();
- // Send 10 more requests per backend. This will continue using the
- // last serverlist we received from the balancer before it was shut down.
- ResetBackendCounters();
- CheckRpcSendOk(kNumBackendsFirstPass);
- // Each backend should have gotten 1 request.
- for (size_t i = 0; i < kNumBackendsFirstPass; ++i) {
- EXPECT_EQ(1UL, backends_[i]->service_.request_count());
- }
- // Now restart the balancer, this time pointing to all backends.
- balancers_[0]->Start(server_host_);
- ScheduleResponseForBalancer(0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(kNumBackendsFirstPass), {}),
- 0);
- // Wait for queries to start going to one of the new backends.
- // This tells us that we're now using the new serverlist.
- do {
- CheckRpcSendOk();
- } while (backends_[2]->service_.request_count() == 0 &&
- backends_[3]->service_.request_count() == 0);
- // Send one RPC per backend.
- CheckRpcSendOk(kNumBackendsSecondPass);
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // Check client stats.
- client_stats = WaitForLoadReports();
- EXPECT_EQ(kNumBackendsSecondPass + 1, client_stats.num_calls_started);
- EXPECT_EQ(kNumBackendsSecondPass + 1, client_stats.num_calls_finished);
- EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
- EXPECT_EQ(kNumBackendsSecondPass + 1,
- client_stats.num_calls_finished_known_received);
- EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
-}
-
-TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
- SetNextResolutionAllBalancers();
- const size_t kNumRpcsPerAddress = 3;
- const int num_of_drop_by_rate_limiting_addresses = 2;
- const int num_of_drop_by_load_balancing_addresses = 1;
- const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
- num_of_drop_by_load_balancing_addresses;
- const int num_total_addresses = num_backends_ + num_of_drop_addresses;
- ScheduleResponseForBalancer(
- 0,
- BalancerServiceImpl::BuildResponseForBackends(
- GetBackendPorts(),
- {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
- {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
- 0);
- // Wait until all backends are ready.
- int num_warmup_ok = 0;
- int num_warmup_failure = 0;
- int num_warmup_drops = 0;
- std::tie(num_warmup_ok, num_warmup_failure, num_warmup_drops) =
- WaitForAllBackends(num_total_addresses /* num_requests_multiple_of */);
- const int num_total_warmup_requests =
- num_warmup_ok + num_warmup_failure + num_warmup_drops;
- size_t num_drops = 0;
- for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
- EchoResponse response;
- const Status status = SendRpc(&response);
- if (!status.ok() &&
- status.error_message() == "Call dropped by load balancing policy") {
- ++num_drops;
- } else {
- EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
- << " message=" << status.error_message();
- EXPECT_EQ(response.message(), kRequestMessage_);
- }
- }
- EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
- // Each backend should have gotten 100 requests.
- for (size_t i = 0; i < backends_.size(); ++i) {
- EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
- }
- balancers_[0]->service_.NotifyDoneWithServerlists();
- // The balancer got a single request.
- EXPECT_EQ(1U, balancers_[0]->service_.request_count());
- // and sent a single response.
- EXPECT_EQ(1U, balancers_[0]->service_.response_count());
-
- const ClientStats client_stats = WaitForLoadReports();
- EXPECT_EQ(
- kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
- client_stats.num_calls_started);
- EXPECT_EQ(
- kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
- client_stats.num_calls_finished);
- EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
- EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_warmup_ok,
- client_stats.num_calls_finished_known_received);
- // The number of warmup request is a multiple of the number of addresses.
- // Therefore, all addresses in the scheduled balancer response are hit the
- // same number of times.
- const int num_times_drop_addresses_hit =
- num_warmup_drops / num_of_drop_addresses;
- EXPECT_THAT(
- client_stats.drop_token_counts,
- ::testing::ElementsAre(
- ::testing::Pair("load_balancing",
- (kNumRpcsPerAddress + num_times_drop_addresses_hit)),
- ::testing::Pair(
- "rate_limiting",
- (kNumRpcsPerAddress + num_times_drop_addresses_hit) * 2)));
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- const auto result = RUN_ALL_TESTS();
- return result;
-}
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
+ client_stats.num_calls_started);
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
+ client_stats.num_calls_finished);
+ EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + (num_ok + num_drops),
+ client_stats.num_calls_finished_known_received);
+ EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
+}
+
+TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumBackendsFirstPass = 2;
+ const size_t kNumBackendsSecondPass =
+ backends_.size() - kNumBackendsFirstPass;
+ // Balancer returns backends starting at index 1.
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(0, kNumBackendsFirstPass), {}),
+ 0);
+ // Wait until all backends returned by the balancer are ready.
+ int num_ok = 0;
+ int num_failure = 0;
+ int num_drops = 0;
+ std::tie(num_ok, num_failure, num_drops) =
+ WaitForAllBackends(/* num_requests_multiple_of */ 1, /* start_index */ 0,
+ /* stop_index */ kNumBackendsFirstPass);
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ ClientStats client_stats = WaitForLoadReports();
+ EXPECT_EQ(static_cast<size_t>(num_ok), client_stats.num_calls_started);
+ EXPECT_EQ(static_cast<size_t>(num_ok), client_stats.num_calls_finished);
+ EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
+ EXPECT_EQ(static_cast<size_t>(num_ok),
+ client_stats.num_calls_finished_known_received);
+ EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
+ // Shut down the balancer.
+ balancers_[0]->Shutdown();
+ // Send 10 more requests per backend. This will continue using the
+ // last serverlist we received from the balancer before it was shut down.
+ ResetBackendCounters();
+ CheckRpcSendOk(kNumBackendsFirstPass);
+ // Each backend should have gotten 1 request.
+ for (size_t i = 0; i < kNumBackendsFirstPass; ++i) {
+ EXPECT_EQ(1UL, backends_[i]->service_.request_count());
+ }
+ // Now restart the balancer, this time pointing to all backends.
+ balancers_[0]->Start(server_host_);
+ ScheduleResponseForBalancer(0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(kNumBackendsFirstPass), {}),
+ 0);
+ // Wait for queries to start going to one of the new backends.
+ // This tells us that we're now using the new serverlist.
+ do {
+ CheckRpcSendOk();
+ } while (backends_[2]->service_.request_count() == 0 &&
+ backends_[3]->service_.request_count() == 0);
+ // Send one RPC per backend.
+ CheckRpcSendOk(kNumBackendsSecondPass);
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // Check client stats.
+ client_stats = WaitForLoadReports();
+ EXPECT_EQ(kNumBackendsSecondPass + 1, client_stats.num_calls_started);
+ EXPECT_EQ(kNumBackendsSecondPass + 1, client_stats.num_calls_finished);
+ EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
+ EXPECT_EQ(kNumBackendsSecondPass + 1,
+ client_stats.num_calls_finished_known_received);
+ EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
+}
+
+TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
+ SetNextResolutionAllBalancers();
+ const size_t kNumRpcsPerAddress = 3;
+ const int num_of_drop_by_rate_limiting_addresses = 2;
+ const int num_of_drop_by_load_balancing_addresses = 1;
+ const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
+ num_of_drop_by_load_balancing_addresses;
+ const int num_total_addresses = num_backends_ + num_of_drop_addresses;
+ ScheduleResponseForBalancer(
+ 0,
+ BalancerServiceImpl::BuildResponseForBackends(
+ GetBackendPorts(),
+ {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
+ {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
+ 0);
+ // Wait until all backends are ready.
+ int num_warmup_ok = 0;
+ int num_warmup_failure = 0;
+ int num_warmup_drops = 0;
+ std::tie(num_warmup_ok, num_warmup_failure, num_warmup_drops) =
+ WaitForAllBackends(num_total_addresses /* num_requests_multiple_of */);
+ const int num_total_warmup_requests =
+ num_warmup_ok + num_warmup_failure + num_warmup_drops;
+ size_t num_drops = 0;
+ for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
+ EchoResponse response;
+ const Status status = SendRpc(&response);
+ if (!status.ok() &&
+ status.error_message() == "Call dropped by load balancing policy") {
+ ++num_drops;
+ } else {
+ EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
+ << " message=" << status.error_message();
+ EXPECT_EQ(response.message(), kRequestMessage_);
+ }
+ }
+ EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
+ // Each backend should have gotten 100 requests.
+ for (size_t i = 0; i < backends_.size(); ++i) {
+ EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
+ }
+ balancers_[0]->service_.NotifyDoneWithServerlists();
+ // The balancer got a single request.
+ EXPECT_EQ(1U, balancers_[0]->service_.request_count());
+ // and sent a single response.
+ EXPECT_EQ(1U, balancers_[0]->service_.response_count());
+
+ const ClientStats client_stats = WaitForLoadReports();
+ EXPECT_EQ(
+ kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
+ client_stats.num_calls_started);
+ EXPECT_EQ(
+ kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
+ client_stats.num_calls_finished);
+ EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
+ EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_warmup_ok,
+ client_stats.num_calls_finished_known_received);
+ // The number of warmup request is a multiple of the number of addresses.
+ // Therefore, all addresses in the scheduled balancer response are hit the
+ // same number of times.
+ const int num_times_drop_addresses_hit =
+ num_warmup_drops / num_of_drop_addresses;
+ EXPECT_THAT(
+ client_stats.drop_token_counts,
+ ::testing::ElementsAre(
+ ::testing::Pair("load_balancing",
+ (kNumRpcsPerAddress + num_times_drop_addresses_hit)),
+ ::testing::Pair(
+ "rate_limiting",
+ (kNumRpcsPerAddress + num_times_drop_addresses_hit) * 2)));
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ const auto result = RUN_ALL_TESTS();
+ return result;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/health/ya.make b/contrib/libs/grpc/test/cpp/end2end/health/ya.make
index 0eda6bab65..7330129b73 100644
--- a/contrib/libs/grpc/test/cpp/end2end/health/ya.make
+++ b/contrib/libs/grpc/test/cpp/end2end/health/ya.make
@@ -1,33 +1,33 @@
GTEST_UGLY()
-
-OWNER(
- dvshkurko
- g:ymake
-)
-
-ADDINCL(
+
+OWNER(
+ dvshkurko
+ g:ymake
+)
+
+ADDINCL(
${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
- ${ARCADIA_ROOT}/contrib/libs/grpc
-)
-
-PEERDIR(
- contrib/libs/grpc/src/proto/grpc/health/v1
- contrib/libs/grpc/src/proto/grpc/core
- contrib/libs/grpc/src/proto/grpc/testing
- contrib/libs/grpc/src/proto/grpc/testing/duplicate
- contrib/libs/grpc/test/core/util
- contrib/libs/grpc/test/cpp/end2end
- contrib/libs/grpc/test/cpp/util
-)
-
-NO_COMPILER_WARNINGS()
-
-SRCDIR(
- contrib/libs/grpc/test/cpp/end2end
-)
-
-SRCS(
- health_service_end2end_test.cc
-)
-
-END()
+ ${ARCADIA_ROOT}/contrib/libs/grpc
+)
+
+PEERDIR(
+ contrib/libs/grpc/src/proto/grpc/health/v1
+ contrib/libs/grpc/src/proto/grpc/core
+ contrib/libs/grpc/src/proto/grpc/testing
+ contrib/libs/grpc/src/proto/grpc/testing/duplicate
+ contrib/libs/grpc/test/core/util
+ contrib/libs/grpc/test/cpp/end2end
+ contrib/libs/grpc/test/cpp/util
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(
+ contrib/libs/grpc/test/cpp/end2end
+)
+
+SRCS(
+ health_service_end2end_test.cc
+)
+
+END()
diff --git a/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc
index 628917274b..516b3a4c81 100644
--- a/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/health_service_end2end_test.cc
@@ -1,374 +1,374 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-#include <mutex>
-#include <thread>
-#include <vector>
-
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/ext/health_check_service_server_builder_option.h>
-#include <grpcpp/health_check_service_interface.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/health/v1/health.grpc.pb.h"
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_health_check_service_impl.h"
-#include "test/cpp/end2end/test_service_impl.h"
-
-#include <gtest/gtest.h>
-
-using grpc::health::v1::Health;
-using grpc::health::v1::HealthCheckRequest;
-using grpc::health::v1::HealthCheckResponse;
-
-namespace grpc {
-namespace testing {
-namespace {
-
-// A custom implementation of the health checking service interface. This is
-// used to test that it prevents the server from creating a default service and
-// also serves as an example of how to override the default service.
-class CustomHealthCheckService : public HealthCheckServiceInterface {
- public:
- explicit CustomHealthCheckService(HealthCheckServiceImpl* impl)
- : impl_(impl) {
- impl_->SetStatus("", HealthCheckResponse::SERVING);
- }
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/ext/health_check_service_server_builder_option.h>
+#include <grpcpp/health_check_service_interface.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/health/v1/health.grpc.pb.h"
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_health_check_service_impl.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include <gtest/gtest.h>
+
+using grpc::health::v1::Health;
+using grpc::health::v1::HealthCheckRequest;
+using grpc::health::v1::HealthCheckResponse;
+
+namespace grpc {
+namespace testing {
+namespace {
+
+// A custom implementation of the health checking service interface. This is
+// used to test that it prevents the server from creating a default service and
+// also serves as an example of how to override the default service.
+class CustomHealthCheckService : public HealthCheckServiceInterface {
+ public:
+ explicit CustomHealthCheckService(HealthCheckServiceImpl* impl)
+ : impl_(impl) {
+ impl_->SetStatus("", HealthCheckResponse::SERVING);
+ }
void SetServingStatus(const TString& service_name,
- bool serving) override {
- impl_->SetStatus(service_name, serving ? HealthCheckResponse::SERVING
- : HealthCheckResponse::NOT_SERVING);
- }
-
- void SetServingStatus(bool serving) override {
- impl_->SetAll(serving ? HealthCheckResponse::SERVING
- : HealthCheckResponse::NOT_SERVING);
- }
-
- void Shutdown() override { impl_->Shutdown(); }
-
- private:
- HealthCheckServiceImpl* impl_; // not owned
-};
-
-class HealthServiceEnd2endTest : public ::testing::Test {
- protected:
- HealthServiceEnd2endTest() {}
-
- void SetUpServer(bool register_sync_test_service, bool add_async_cq,
- bool explicit_health_service,
- std::unique_ptr<HealthCheckServiceInterface> service) {
- int port = 5001; // grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port;
-
- bool register_sync_health_service_impl =
- explicit_health_service && service != nullptr;
-
- // Setup server
- ServerBuilder builder;
- if (explicit_health_service) {
- std::unique_ptr<ServerBuilderOption> option(
- new HealthCheckServiceServerBuilderOption(std::move(service)));
- builder.SetOption(std::move(option));
- }
- builder.AddListeningPort(server_address_.str(),
- grpc::InsecureServerCredentials());
- if (register_sync_test_service) {
- // Register a sync service.
- builder.RegisterService(&echo_test_service_);
- }
- if (register_sync_health_service_impl) {
- builder.RegisterService(&health_check_service_impl_);
- }
- if (add_async_cq) {
- cq_ = builder.AddCompletionQueue();
- }
- server_ = builder.BuildAndStart();
- }
-
- void TearDown() override {
- if (server_) {
- server_->Shutdown();
- if (cq_ != nullptr) {
- cq_->Shutdown();
- }
- if (cq_thread_.joinable()) {
- cq_thread_.join();
- }
- }
- }
-
- void ResetStubs() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- hc_stub_ = grpc::health::v1::Health::NewStub(channel);
- }
-
- // When the expected_status is NOT OK, we do not care about the response.
+ bool serving) override {
+ impl_->SetStatus(service_name, serving ? HealthCheckResponse::SERVING
+ : HealthCheckResponse::NOT_SERVING);
+ }
+
+ void SetServingStatus(bool serving) override {
+ impl_->SetAll(serving ? HealthCheckResponse::SERVING
+ : HealthCheckResponse::NOT_SERVING);
+ }
+
+ void Shutdown() override { impl_->Shutdown(); }
+
+ private:
+ HealthCheckServiceImpl* impl_; // not owned
+};
+
+class HealthServiceEnd2endTest : public ::testing::Test {
+ protected:
+ HealthServiceEnd2endTest() {}
+
+ void SetUpServer(bool register_sync_test_service, bool add_async_cq,
+ bool explicit_health_service,
+ std::unique_ptr<HealthCheckServiceInterface> service) {
+ int port = 5001; // grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port;
+
+ bool register_sync_health_service_impl =
+ explicit_health_service && service != nullptr;
+
+ // Setup server
+ ServerBuilder builder;
+ if (explicit_health_service) {
+ std::unique_ptr<ServerBuilderOption> option(
+ new HealthCheckServiceServerBuilderOption(std::move(service)));
+ builder.SetOption(std::move(option));
+ }
+ builder.AddListeningPort(server_address_.str(),
+ grpc::InsecureServerCredentials());
+ if (register_sync_test_service) {
+ // Register a sync service.
+ builder.RegisterService(&echo_test_service_);
+ }
+ if (register_sync_health_service_impl) {
+ builder.RegisterService(&health_check_service_impl_);
+ }
+ if (add_async_cq) {
+ cq_ = builder.AddCompletionQueue();
+ }
+ server_ = builder.BuildAndStart();
+ }
+
+ void TearDown() override {
+ if (server_) {
+ server_->Shutdown();
+ if (cq_ != nullptr) {
+ cq_->Shutdown();
+ }
+ if (cq_thread_.joinable()) {
+ cq_thread_.join();
+ }
+ }
+ }
+
+ void ResetStubs() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ hc_stub_ = grpc::health::v1::Health::NewStub(channel);
+ }
+
+ // When the expected_status is NOT OK, we do not care about the response.
void SendHealthCheckRpc(const TString& service_name,
- const Status& expected_status) {
- EXPECT_FALSE(expected_status.ok());
- SendHealthCheckRpc(service_name, expected_status,
- HealthCheckResponse::UNKNOWN);
- }
-
- void SendHealthCheckRpc(
+ const Status& expected_status) {
+ EXPECT_FALSE(expected_status.ok());
+ SendHealthCheckRpc(service_name, expected_status,
+ HealthCheckResponse::UNKNOWN);
+ }
+
+ void SendHealthCheckRpc(
const TString& service_name, const Status& expected_status,
- HealthCheckResponse::ServingStatus expected_serving_status) {
- HealthCheckRequest request;
- request.set_service(service_name);
- HealthCheckResponse response;
- ClientContext context;
- Status s = hc_stub_->Check(&context, request, &response);
- EXPECT_EQ(expected_status.error_code(), s.error_code());
- if (s.ok()) {
- EXPECT_EQ(expected_serving_status, response.status());
- }
- }
-
- void VerifyHealthCheckService() {
- HealthCheckServiceInterface* service = server_->GetHealthCheckService();
- EXPECT_TRUE(service != nullptr);
+ HealthCheckResponse::ServingStatus expected_serving_status) {
+ HealthCheckRequest request;
+ request.set_service(service_name);
+ HealthCheckResponse response;
+ ClientContext context;
+ Status s = hc_stub_->Check(&context, request, &response);
+ EXPECT_EQ(expected_status.error_code(), s.error_code());
+ if (s.ok()) {
+ EXPECT_EQ(expected_serving_status, response.status());
+ }
+ }
+
+ void VerifyHealthCheckService() {
+ HealthCheckServiceInterface* service = server_->GetHealthCheckService();
+ EXPECT_TRUE(service != nullptr);
const TString kHealthyService("healthy_service");
const TString kUnhealthyService("unhealthy_service");
const TString kNotRegisteredService("not_registered");
- service->SetServingStatus(kHealthyService, true);
- service->SetServingStatus(kUnhealthyService, false);
-
- ResetStubs();
-
- SendHealthCheckRpc("", Status::OK, HealthCheckResponse::SERVING);
- SendHealthCheckRpc(kHealthyService, Status::OK,
- HealthCheckResponse::SERVING);
- SendHealthCheckRpc(kUnhealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kNotRegisteredService,
- Status(StatusCode::NOT_FOUND, ""));
-
- service->SetServingStatus(false);
- SendHealthCheckRpc("", Status::OK, HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kHealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kUnhealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kNotRegisteredService,
- Status(StatusCode::NOT_FOUND, ""));
- }
-
- void VerifyHealthCheckServiceStreaming() {
+ service->SetServingStatus(kHealthyService, true);
+ service->SetServingStatus(kUnhealthyService, false);
+
+ ResetStubs();
+
+ SendHealthCheckRpc("", Status::OK, HealthCheckResponse::SERVING);
+ SendHealthCheckRpc(kHealthyService, Status::OK,
+ HealthCheckResponse::SERVING);
+ SendHealthCheckRpc(kUnhealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kNotRegisteredService,
+ Status(StatusCode::NOT_FOUND, ""));
+
+ service->SetServingStatus(false);
+ SendHealthCheckRpc("", Status::OK, HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kHealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kUnhealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kNotRegisteredService,
+ Status(StatusCode::NOT_FOUND, ""));
+ }
+
+ void VerifyHealthCheckServiceStreaming() {
const TString kServiceName("service_name");
- HealthCheckServiceInterface* service = server_->GetHealthCheckService();
- // Start Watch for service.
- ClientContext context;
- HealthCheckRequest request;
- request.set_service(kServiceName);
- std::unique_ptr<::grpc::ClientReaderInterface<HealthCheckResponse>> reader =
- hc_stub_->Watch(&context, request);
- // Initial response will be SERVICE_UNKNOWN.
- HealthCheckResponse response;
- EXPECT_TRUE(reader->Read(&response));
- EXPECT_EQ(response.SERVICE_UNKNOWN, response.status());
- response.Clear();
- // Now set service to NOT_SERVING and make sure we get an update.
- service->SetServingStatus(kServiceName, false);
- EXPECT_TRUE(reader->Read(&response));
- EXPECT_EQ(response.NOT_SERVING, response.status());
- response.Clear();
- // Now set service to SERVING and make sure we get another update.
- service->SetServingStatus(kServiceName, true);
- EXPECT_TRUE(reader->Read(&response));
- EXPECT_EQ(response.SERVING, response.status());
- // Finish call.
- context.TryCancel();
- }
-
- // Verify that after HealthCheckServiceInterface::Shutdown is called
- // 1. unary client will see NOT_SERVING.
- // 2. unary client still sees NOT_SERVING after a SetServing(true) is called.
- // 3. streaming (Watch) client will see an update.
- // 4. setting a new service to serving after shutdown will add the service
- // name but return NOT_SERVING to client.
- // This has to be called last.
- void VerifyHealthCheckServiceShutdown() {
- HealthCheckServiceInterface* service = server_->GetHealthCheckService();
- EXPECT_TRUE(service != nullptr);
+ HealthCheckServiceInterface* service = server_->GetHealthCheckService();
+ // Start Watch for service.
+ ClientContext context;
+ HealthCheckRequest request;
+ request.set_service(kServiceName);
+ std::unique_ptr<::grpc::ClientReaderInterface<HealthCheckResponse>> reader =
+ hc_stub_->Watch(&context, request);
+ // Initial response will be SERVICE_UNKNOWN.
+ HealthCheckResponse response;
+ EXPECT_TRUE(reader->Read(&response));
+ EXPECT_EQ(response.SERVICE_UNKNOWN, response.status());
+ response.Clear();
+ // Now set service to NOT_SERVING and make sure we get an update.
+ service->SetServingStatus(kServiceName, false);
+ EXPECT_TRUE(reader->Read(&response));
+ EXPECT_EQ(response.NOT_SERVING, response.status());
+ response.Clear();
+ // Now set service to SERVING and make sure we get another update.
+ service->SetServingStatus(kServiceName, true);
+ EXPECT_TRUE(reader->Read(&response));
+ EXPECT_EQ(response.SERVING, response.status());
+ // Finish call.
+ context.TryCancel();
+ }
+
+ // Verify that after HealthCheckServiceInterface::Shutdown is called
+ // 1. unary client will see NOT_SERVING.
+ // 2. unary client still sees NOT_SERVING after a SetServing(true) is called.
+ // 3. streaming (Watch) client will see an update.
+ // 4. setting a new service to serving after shutdown will add the service
+ // name but return NOT_SERVING to client.
+ // This has to be called last.
+ void VerifyHealthCheckServiceShutdown() {
+ HealthCheckServiceInterface* service = server_->GetHealthCheckService();
+ EXPECT_TRUE(service != nullptr);
const TString kHealthyService("healthy_service");
const TString kUnhealthyService("unhealthy_service");
const TString kNotRegisteredService("not_registered");
const TString kNewService("add_after_shutdown");
- service->SetServingStatus(kHealthyService, true);
- service->SetServingStatus(kUnhealthyService, false);
-
- ResetStubs();
-
- // Start Watch for service.
- ClientContext context;
- HealthCheckRequest request;
- request.set_service(kHealthyService);
- std::unique_ptr<::grpc::ClientReaderInterface<HealthCheckResponse>> reader =
- hc_stub_->Watch(&context, request);
-
- HealthCheckResponse response;
- EXPECT_TRUE(reader->Read(&response));
- EXPECT_EQ(response.SERVING, response.status());
-
- SendHealthCheckRpc("", Status::OK, HealthCheckResponse::SERVING);
- SendHealthCheckRpc(kHealthyService, Status::OK,
- HealthCheckResponse::SERVING);
- SendHealthCheckRpc(kUnhealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kNotRegisteredService,
- Status(StatusCode::NOT_FOUND, ""));
- SendHealthCheckRpc(kNewService, Status(StatusCode::NOT_FOUND, ""));
-
- // Shutdown health check service.
- service->Shutdown();
-
- // Watch client gets another update.
- EXPECT_TRUE(reader->Read(&response));
- EXPECT_EQ(response.NOT_SERVING, response.status());
- // Finish Watch call.
- context.TryCancel();
-
- SendHealthCheckRpc("", Status::OK, HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kHealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kUnhealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- SendHealthCheckRpc(kNotRegisteredService,
- Status(StatusCode::NOT_FOUND, ""));
-
- // Setting status after Shutdown has no effect.
- service->SetServingStatus(kHealthyService, true);
- SendHealthCheckRpc(kHealthyService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
-
- // Adding serving status for a new service after shutdown will return
- // NOT_SERVING.
- service->SetServingStatus(kNewService, true);
- SendHealthCheckRpc(kNewService, Status::OK,
- HealthCheckResponse::NOT_SERVING);
- }
-
- TestServiceImpl echo_test_service_;
- HealthCheckServiceImpl health_check_service_impl_;
- std::unique_ptr<Health::Stub> hc_stub_;
- std::unique_ptr<ServerCompletionQueue> cq_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
- std::thread cq_thread_;
-};
-
-TEST_F(HealthServiceEnd2endTest, DefaultHealthServiceDisabled) {
- EnableDefaultHealthCheckService(false);
- EXPECT_FALSE(DefaultHealthCheckServiceEnabled());
- SetUpServer(true, false, false, nullptr);
- HealthCheckServiceInterface* default_service =
- server_->GetHealthCheckService();
- EXPECT_TRUE(default_service == nullptr);
-
- ResetStubs();
-
- SendHealthCheckRpc("", Status(StatusCode::UNIMPLEMENTED, ""));
-}
-
-TEST_F(HealthServiceEnd2endTest, DefaultHealthService) {
- EnableDefaultHealthCheckService(true);
- EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
- SetUpServer(true, false, false, nullptr);
- VerifyHealthCheckService();
- VerifyHealthCheckServiceStreaming();
-
- // The default service has a size limit of the service name.
+ service->SetServingStatus(kHealthyService, true);
+ service->SetServingStatus(kUnhealthyService, false);
+
+ ResetStubs();
+
+ // Start Watch for service.
+ ClientContext context;
+ HealthCheckRequest request;
+ request.set_service(kHealthyService);
+ std::unique_ptr<::grpc::ClientReaderInterface<HealthCheckResponse>> reader =
+ hc_stub_->Watch(&context, request);
+
+ HealthCheckResponse response;
+ EXPECT_TRUE(reader->Read(&response));
+ EXPECT_EQ(response.SERVING, response.status());
+
+ SendHealthCheckRpc("", Status::OK, HealthCheckResponse::SERVING);
+ SendHealthCheckRpc(kHealthyService, Status::OK,
+ HealthCheckResponse::SERVING);
+ SendHealthCheckRpc(kUnhealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kNotRegisteredService,
+ Status(StatusCode::NOT_FOUND, ""));
+ SendHealthCheckRpc(kNewService, Status(StatusCode::NOT_FOUND, ""));
+
+ // Shutdown health check service.
+ service->Shutdown();
+
+ // Watch client gets another update.
+ EXPECT_TRUE(reader->Read(&response));
+ EXPECT_EQ(response.NOT_SERVING, response.status());
+ // Finish Watch call.
+ context.TryCancel();
+
+ SendHealthCheckRpc("", Status::OK, HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kHealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kUnhealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ SendHealthCheckRpc(kNotRegisteredService,
+ Status(StatusCode::NOT_FOUND, ""));
+
+ // Setting status after Shutdown has no effect.
+ service->SetServingStatus(kHealthyService, true);
+ SendHealthCheckRpc(kHealthyService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+
+ // Adding serving status for a new service after shutdown will return
+ // NOT_SERVING.
+ service->SetServingStatus(kNewService, true);
+ SendHealthCheckRpc(kNewService, Status::OK,
+ HealthCheckResponse::NOT_SERVING);
+ }
+
+ TestServiceImpl echo_test_service_;
+ HealthCheckServiceImpl health_check_service_impl_;
+ std::unique_ptr<Health::Stub> hc_stub_;
+ std::unique_ptr<ServerCompletionQueue> cq_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ std::thread cq_thread_;
+};
+
+TEST_F(HealthServiceEnd2endTest, DefaultHealthServiceDisabled) {
+ EnableDefaultHealthCheckService(false);
+ EXPECT_FALSE(DefaultHealthCheckServiceEnabled());
+ SetUpServer(true, false, false, nullptr);
+ HealthCheckServiceInterface* default_service =
+ server_->GetHealthCheckService();
+ EXPECT_TRUE(default_service == nullptr);
+
+ ResetStubs();
+
+ SendHealthCheckRpc("", Status(StatusCode::UNIMPLEMENTED, ""));
+}
+
+TEST_F(HealthServiceEnd2endTest, DefaultHealthService) {
+ EnableDefaultHealthCheckService(true);
+ EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
+ SetUpServer(true, false, false, nullptr);
+ VerifyHealthCheckService();
+ VerifyHealthCheckServiceStreaming();
+
+ // The default service has a size limit of the service name.
const TString kTooLongServiceName(201, 'x');
- SendHealthCheckRpc(kTooLongServiceName,
- Status(StatusCode::INVALID_ARGUMENT, ""));
-}
-
-TEST_F(HealthServiceEnd2endTest, DefaultHealthServiceShutdown) {
- EnableDefaultHealthCheckService(true);
- EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
- SetUpServer(true, false, false, nullptr);
- VerifyHealthCheckServiceShutdown();
-}
-
-// Provide an empty service to disable the default service.
-TEST_F(HealthServiceEnd2endTest, ExplicitlyDisableViaOverride) {
- EnableDefaultHealthCheckService(true);
- EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
- std::unique_ptr<HealthCheckServiceInterface> empty_service;
- SetUpServer(true, false, true, std::move(empty_service));
- HealthCheckServiceInterface* service = server_->GetHealthCheckService();
- EXPECT_TRUE(service == nullptr);
-
- ResetStubs();
-
- SendHealthCheckRpc("", Status(StatusCode::UNIMPLEMENTED, ""));
-}
-
-// Provide an explicit override of health checking service interface.
-TEST_F(HealthServiceEnd2endTest, ExplicitlyOverride) {
- EnableDefaultHealthCheckService(true);
- EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
- std::unique_ptr<HealthCheckServiceInterface> override_service(
- new CustomHealthCheckService(&health_check_service_impl_));
- HealthCheckServiceInterface* underlying_service = override_service.get();
- SetUpServer(false, false, true, std::move(override_service));
- HealthCheckServiceInterface* service = server_->GetHealthCheckService();
- EXPECT_TRUE(service == underlying_service);
-
- ResetStubs();
-
- VerifyHealthCheckService();
- VerifyHealthCheckServiceStreaming();
-}
-
-TEST_F(HealthServiceEnd2endTest, ExplicitlyHealthServiceShutdown) {
- EnableDefaultHealthCheckService(true);
- EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
- std::unique_ptr<HealthCheckServiceInterface> override_service(
- new CustomHealthCheckService(&health_check_service_impl_));
- HealthCheckServiceInterface* underlying_service = override_service.get();
- SetUpServer(false, false, true, std::move(override_service));
- HealthCheckServiceInterface* service = server_->GetHealthCheckService();
- EXPECT_TRUE(service == underlying_service);
-
- ResetStubs();
-
- VerifyHealthCheckServiceShutdown();
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ SendHealthCheckRpc(kTooLongServiceName,
+ Status(StatusCode::INVALID_ARGUMENT, ""));
+}
+
+TEST_F(HealthServiceEnd2endTest, DefaultHealthServiceShutdown) {
+ EnableDefaultHealthCheckService(true);
+ EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
+ SetUpServer(true, false, false, nullptr);
+ VerifyHealthCheckServiceShutdown();
+}
+
+// Provide an empty service to disable the default service.
+TEST_F(HealthServiceEnd2endTest, ExplicitlyDisableViaOverride) {
+ EnableDefaultHealthCheckService(true);
+ EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
+ std::unique_ptr<HealthCheckServiceInterface> empty_service;
+ SetUpServer(true, false, true, std::move(empty_service));
+ HealthCheckServiceInterface* service = server_->GetHealthCheckService();
+ EXPECT_TRUE(service == nullptr);
+
+ ResetStubs();
+
+ SendHealthCheckRpc("", Status(StatusCode::UNIMPLEMENTED, ""));
+}
+
+// Provide an explicit override of health checking service interface.
+TEST_F(HealthServiceEnd2endTest, ExplicitlyOverride) {
+ EnableDefaultHealthCheckService(true);
+ EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
+ std::unique_ptr<HealthCheckServiceInterface> override_service(
+ new CustomHealthCheckService(&health_check_service_impl_));
+ HealthCheckServiceInterface* underlying_service = override_service.get();
+ SetUpServer(false, false, true, std::move(override_service));
+ HealthCheckServiceInterface* service = server_->GetHealthCheckService();
+ EXPECT_TRUE(service == underlying_service);
+
+ ResetStubs();
+
+ VerifyHealthCheckService();
+ VerifyHealthCheckServiceStreaming();
+}
+
+TEST_F(HealthServiceEnd2endTest, ExplicitlyHealthServiceShutdown) {
+ EnableDefaultHealthCheckService(true);
+ EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
+ std::unique_ptr<HealthCheckServiceInterface> override_service(
+ new CustomHealthCheckService(&health_check_service_impl_));
+ HealthCheckServiceInterface* underlying_service = override_service.get();
+ SetUpServer(false, false, true, std::move(override_service));
+ HealthCheckServiceInterface* service = server_->GetHealthCheckService();
+ EXPECT_TRUE(service == underlying_service);
+
+ ResetStubs();
+
+ VerifyHealthCheckServiceShutdown();
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
index 9fc5cc1ba7..e4ebee8e93 100644
--- a/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/hybrid_end2end_test.cc
@@ -1,987 +1,987 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/generic/async_generic_service.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/iomgr/iomgr.h"
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-namespace {
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/generic/async_generic_service.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/iomgr.h"
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+namespace {
+
#ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
using ::grpc::experimental::CallbackGenericService;
using ::grpc::experimental::GenericCallbackServerContext;
using ::grpc::experimental::ServerGenericBidiReactor;
#endif
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
-
-bool VerifyReturnSuccess(CompletionQueue* cq, int i) {
- void* got_tag;
- bool ok;
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- EXPECT_EQ(tag(i), got_tag);
- return ok;
-}
-
-void Verify(CompletionQueue* cq, int i, bool expect_ok) {
- EXPECT_EQ(expect_ok, VerifyReturnSuccess(cq, i));
-}
-
-// Handlers to handle async request at a server. To be run in a separate thread.
-template <class Service>
-void HandleEcho(Service* service, ServerCompletionQueue* cq, bool dup_service) {
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
- EchoRequest recv_request;
- EchoResponse send_response;
- service->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq, cq,
- tag(1));
- Verify(cq, 1, true);
- send_response.set_message(recv_request.message());
- if (dup_service) {
- send_response.mutable_message()->append("_dup");
- }
- response_writer.Finish(send_response, Status::OK, tag(2));
- Verify(cq, 2, true);
-}
-
-// Handlers to handle raw request at a server. To be run in a
-// separate thread. Note that this is the same as the async version, except
-// that the req/resp are ByteBuffers
-template <class Service>
-void HandleRawEcho(Service* service, ServerCompletionQueue* cq,
- bool /*dup_service*/) {
- ServerContext srv_ctx;
- GenericServerAsyncResponseWriter response_writer(&srv_ctx);
- ByteBuffer recv_buffer;
- service->RequestEcho(&srv_ctx, &recv_buffer, &response_writer, cq, cq,
- tag(1));
- Verify(cq, 1, true);
- EchoRequest recv_request;
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EchoResponse send_response;
- send_response.set_message(recv_request.message());
- auto send_buffer = SerializeToByteBuffer(&send_response);
- response_writer.Finish(*send_buffer, Status::OK, tag(2));
- Verify(cq, 2, true);
-}
-
-template <class Service>
-void HandleClientStreaming(Service* service, ServerCompletionQueue* cq) {
- ServerContext srv_ctx;
- EchoRequest recv_request;
- EchoResponse send_response;
- ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
- service->RequestRequestStream(&srv_ctx, &srv_stream, cq, cq, tag(1));
- Verify(cq, 1, true);
- int i = 1;
- do {
- i++;
- send_response.mutable_message()->append(recv_request.message());
- srv_stream.Read(&recv_request, tag(i));
- } while (VerifyReturnSuccess(cq, i));
- srv_stream.Finish(send_response, Status::OK, tag(100));
- Verify(cq, 100, true);
-}
-
-template <class Service>
-void HandleRawClientStreaming(Service* service, ServerCompletionQueue* cq) {
- ServerContext srv_ctx;
- ByteBuffer recv_buffer;
- EchoRequest recv_request;
- EchoResponse send_response;
- GenericServerAsyncReader srv_stream(&srv_ctx);
- service->RequestRequestStream(&srv_ctx, &srv_stream, cq, cq, tag(1));
- Verify(cq, 1, true);
- int i = 1;
- while (true) {
- i++;
- srv_stream.Read(&recv_buffer, tag(i));
- if (!VerifyReturnSuccess(cq, i)) {
- break;
- }
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- send_response.mutable_message()->append(recv_request.message());
- }
- auto send_buffer = SerializeToByteBuffer(&send_response);
- srv_stream.Finish(*send_buffer, Status::OK, tag(100));
- Verify(cq, 100, true);
-}
-
-template <class Service>
-void HandleServerStreaming(Service* service, ServerCompletionQueue* cq) {
- ServerContext srv_ctx;
- EchoRequest recv_request;
- EchoResponse send_response;
- ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
- service->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, cq, cq,
- tag(1));
- Verify(cq, 1, true);
- send_response.set_message(recv_request.message() + "0");
- srv_stream.Write(send_response, tag(2));
- Verify(cq, 2, true);
- send_response.set_message(recv_request.message() + "1");
- srv_stream.Write(send_response, tag(3));
- Verify(cq, 3, true);
- send_response.set_message(recv_request.message() + "2");
- srv_stream.Write(send_response, tag(4));
- Verify(cq, 4, true);
- srv_stream.Finish(Status::OK, tag(5));
- Verify(cq, 5, true);
-}
-
-void HandleGenericEcho(GenericServerAsyncReaderWriter* stream,
- CompletionQueue* cq) {
- ByteBuffer recv_buffer;
- stream->Read(&recv_buffer, tag(2));
- Verify(cq, 2, true);
- EchoRequest recv_request;
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EchoResponse send_response;
- send_response.set_message(recv_request.message());
- auto send_buffer = SerializeToByteBuffer(&send_response);
- stream->Write(*send_buffer, tag(3));
- Verify(cq, 3, true);
- stream->Finish(Status::OK, tag(4));
- Verify(cq, 4, true);
-}
-
-void HandleGenericRequestStream(GenericServerAsyncReaderWriter* stream,
- CompletionQueue* cq) {
- ByteBuffer recv_buffer;
- EchoRequest recv_request;
- EchoResponse send_response;
- int i = 1;
- while (true) {
- i++;
- stream->Read(&recv_buffer, tag(i));
- if (!VerifyReturnSuccess(cq, i)) {
- break;
- }
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- send_response.mutable_message()->append(recv_request.message());
- }
- auto send_buffer = SerializeToByteBuffer(&send_response);
- stream->Write(*send_buffer, tag(99));
- Verify(cq, 99, true);
- stream->Finish(Status::OK, tag(100));
- Verify(cq, 100, true);
-}
-
-// Request and handle one generic call.
-void HandleGenericCall(AsyncGenericService* service,
- ServerCompletionQueue* cq) {
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter stream(&srv_ctx);
- service->RequestCall(&srv_ctx, &stream, cq, cq, tag(1));
- Verify(cq, 1, true);
- if (srv_ctx.method() == "/grpc.testing.EchoTestService/Echo") {
- HandleGenericEcho(&stream, cq);
- } else if (srv_ctx.method() ==
- "/grpc.testing.EchoTestService/RequestStream") {
- HandleGenericRequestStream(&stream, cq);
- } else { // other methods not handled yet.
- gpr_log(GPR_ERROR, "method: %s", srv_ctx.method().c_str());
- GPR_ASSERT(0);
- }
-}
-
-class TestServiceImplDupPkg
- : public ::grpc::testing::duplicate::EchoTestService::Service {
- public:
- Status Echo(ServerContext* /*context*/, const EchoRequest* request,
- EchoResponse* response) override {
- response->set_message(request->message() + "_dup");
- return Status::OK;
- }
-};
-
-class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
- protected:
- HybridEnd2endTest() {}
-
- static void SetUpTestCase() {
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
- }
-
- void SetUp() override {
- inproc_ = (::testing::UnitTest::GetInstance()
- ->current_test_info()
- ->value_param() != nullptr)
- ? GetParam()
- : false;
- }
-
+void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+
+bool VerifyReturnSuccess(CompletionQueue* cq, int i) {
+ void* got_tag;
+ bool ok;
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ EXPECT_EQ(tag(i), got_tag);
+ return ok;
+}
+
+void Verify(CompletionQueue* cq, int i, bool expect_ok) {
+ EXPECT_EQ(expect_ok, VerifyReturnSuccess(cq, i));
+}
+
+// Handlers to handle async request at a server. To be run in a separate thread.
+template <class Service>
+void HandleEcho(Service* service, ServerCompletionQueue* cq, bool dup_service) {
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ service->RequestEcho(&srv_ctx, &recv_request, &response_writer, cq, cq,
+ tag(1));
+ Verify(cq, 1, true);
+ send_response.set_message(recv_request.message());
+ if (dup_service) {
+ send_response.mutable_message()->append("_dup");
+ }
+ response_writer.Finish(send_response, Status::OK, tag(2));
+ Verify(cq, 2, true);
+}
+
+// Handlers to handle raw request at a server. To be run in a
+// separate thread. Note that this is the same as the async version, except
+// that the req/resp are ByteBuffers
+template <class Service>
+void HandleRawEcho(Service* service, ServerCompletionQueue* cq,
+ bool /*dup_service*/) {
+ ServerContext srv_ctx;
+ GenericServerAsyncResponseWriter response_writer(&srv_ctx);
+ ByteBuffer recv_buffer;
+ service->RequestEcho(&srv_ctx, &recv_buffer, &response_writer, cq, cq,
+ tag(1));
+ Verify(cq, 1, true);
+ EchoRequest recv_request;
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EchoResponse send_response;
+ send_response.set_message(recv_request.message());
+ auto send_buffer = SerializeToByteBuffer(&send_response);
+ response_writer.Finish(*send_buffer, Status::OK, tag(2));
+ Verify(cq, 2, true);
+}
+
+template <class Service>
+void HandleClientStreaming(Service* service, ServerCompletionQueue* cq) {
+ ServerContext srv_ctx;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ ServerAsyncReader<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+ service->RequestRequestStream(&srv_ctx, &srv_stream, cq, cq, tag(1));
+ Verify(cq, 1, true);
+ int i = 1;
+ do {
+ i++;
+ send_response.mutable_message()->append(recv_request.message());
+ srv_stream.Read(&recv_request, tag(i));
+ } while (VerifyReturnSuccess(cq, i));
+ srv_stream.Finish(send_response, Status::OK, tag(100));
+ Verify(cq, 100, true);
+}
+
+template <class Service>
+void HandleRawClientStreaming(Service* service, ServerCompletionQueue* cq) {
+ ServerContext srv_ctx;
+ ByteBuffer recv_buffer;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ GenericServerAsyncReader srv_stream(&srv_ctx);
+ service->RequestRequestStream(&srv_ctx, &srv_stream, cq, cq, tag(1));
+ Verify(cq, 1, true);
+ int i = 1;
+ while (true) {
+ i++;
+ srv_stream.Read(&recv_buffer, tag(i));
+ if (!VerifyReturnSuccess(cq, i)) {
+ break;
+ }
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ send_response.mutable_message()->append(recv_request.message());
+ }
+ auto send_buffer = SerializeToByteBuffer(&send_response);
+ srv_stream.Finish(*send_buffer, Status::OK, tag(100));
+ Verify(cq, 100, true);
+}
+
+template <class Service>
+void HandleServerStreaming(Service* service, ServerCompletionQueue* cq) {
+ ServerContext srv_ctx;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ ServerAsyncWriter<EchoResponse> srv_stream(&srv_ctx);
+ service->RequestResponseStream(&srv_ctx, &recv_request, &srv_stream, cq, cq,
+ tag(1));
+ Verify(cq, 1, true);
+ send_response.set_message(recv_request.message() + "0");
+ srv_stream.Write(send_response, tag(2));
+ Verify(cq, 2, true);
+ send_response.set_message(recv_request.message() + "1");
+ srv_stream.Write(send_response, tag(3));
+ Verify(cq, 3, true);
+ send_response.set_message(recv_request.message() + "2");
+ srv_stream.Write(send_response, tag(4));
+ Verify(cq, 4, true);
+ srv_stream.Finish(Status::OK, tag(5));
+ Verify(cq, 5, true);
+}
+
+void HandleGenericEcho(GenericServerAsyncReaderWriter* stream,
+ CompletionQueue* cq) {
+ ByteBuffer recv_buffer;
+ stream->Read(&recv_buffer, tag(2));
+ Verify(cq, 2, true);
+ EchoRequest recv_request;
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EchoResponse send_response;
+ send_response.set_message(recv_request.message());
+ auto send_buffer = SerializeToByteBuffer(&send_response);
+ stream->Write(*send_buffer, tag(3));
+ Verify(cq, 3, true);
+ stream->Finish(Status::OK, tag(4));
+ Verify(cq, 4, true);
+}
+
+void HandleGenericRequestStream(GenericServerAsyncReaderWriter* stream,
+ CompletionQueue* cq) {
+ ByteBuffer recv_buffer;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ int i = 1;
+ while (true) {
+ i++;
+ stream->Read(&recv_buffer, tag(i));
+ if (!VerifyReturnSuccess(cq, i)) {
+ break;
+ }
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ send_response.mutable_message()->append(recv_request.message());
+ }
+ auto send_buffer = SerializeToByteBuffer(&send_response);
+ stream->Write(*send_buffer, tag(99));
+ Verify(cq, 99, true);
+ stream->Finish(Status::OK, tag(100));
+ Verify(cq, 100, true);
+}
+
+// Request and handle one generic call.
+void HandleGenericCall(AsyncGenericService* service,
+ ServerCompletionQueue* cq) {
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter stream(&srv_ctx);
+ service->RequestCall(&srv_ctx, &stream, cq, cq, tag(1));
+ Verify(cq, 1, true);
+ if (srv_ctx.method() == "/grpc.testing.EchoTestService/Echo") {
+ HandleGenericEcho(&stream, cq);
+ } else if (srv_ctx.method() ==
+ "/grpc.testing.EchoTestService/RequestStream") {
+ HandleGenericRequestStream(&stream, cq);
+ } else { // other methods not handled yet.
+ gpr_log(GPR_ERROR, "method: %s", srv_ctx.method().c_str());
+ GPR_ASSERT(0);
+ }
+}
+
+class TestServiceImplDupPkg
+ : public ::grpc::testing::duplicate::EchoTestService::Service {
+ public:
+ Status Echo(ServerContext* /*context*/, const EchoRequest* request,
+ EchoResponse* response) override {
+ response->set_message(request->message() + "_dup");
+ return Status::OK;
+ }
+};
+
+class HybridEnd2endTest : public ::testing::TestWithParam<bool> {
+ protected:
+ HybridEnd2endTest() {}
+
+ static void SetUpTestCase() {
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+ }
+
+ void SetUp() override {
+ inproc_ = (::testing::UnitTest::GetInstance()
+ ->current_test_info()
+ ->value_param() != nullptr)
+ ? GetParam()
+ : false;
+ }
+
bool SetUpServer(::grpc::Service* service1, ::grpc::Service* service2,
AsyncGenericService* generic_service,
CallbackGenericService* callback_generic_service,
int max_message_size = 0) {
- int port = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port;
-
- // Setup server
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- grpc::InsecureServerCredentials());
- // Always add a sync unimplemented service: we rely on having at least one
- // synchronous method to get a listening cq
- builder.RegisterService(&unimplemented_service_);
- builder.RegisterService(service1);
- if (service2) {
- builder.RegisterService(service2);
- }
- if (generic_service) {
- builder.RegisterAsyncGenericService(generic_service);
- }
- if (callback_generic_service) {
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port;
+
+ // Setup server
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ grpc::InsecureServerCredentials());
+ // Always add a sync unimplemented service: we rely on having at least one
+ // synchronous method to get a listening cq
+ builder.RegisterService(&unimplemented_service_);
+ builder.RegisterService(service1);
+ if (service2) {
+ builder.RegisterService(service2);
+ }
+ if (generic_service) {
+ builder.RegisterAsyncGenericService(generic_service);
+ }
+ if (callback_generic_service) {
#ifdef GRPC_CALLBACK_API_NONEXPERIMENTAL
builder.RegisterCallbackGenericService(callback_generic_service);
#else
- builder.experimental().RegisterCallbackGenericService(
- callback_generic_service);
+ builder.experimental().RegisterCallbackGenericService(
+ callback_generic_service);
#endif
- }
-
- if (max_message_size != 0) {
- builder.SetMaxMessageSize(max_message_size);
- }
-
- // Create a separate cq for each potential handler.
- for (int i = 0; i < 5; i++) {
- cqs_.push_back(builder.AddCompletionQueue(false));
- }
- server_ = builder.BuildAndStart();
-
- // If there is a generic callback service, this setup is only successful if
- // we have an iomgr that can run in the background or are inprocess
- return !callback_generic_service || grpc_iomgr_run_in_background() ||
- inproc_;
- }
-
- void TearDown() override {
- if (server_) {
- server_->Shutdown();
- }
- void* ignored_tag;
- bool ignored_ok;
- for (auto it = cqs_.begin(); it != cqs_.end(); ++it) {
- (*it)->Shutdown();
- while ((*it)->Next(&ignored_tag, &ignored_ok))
- ;
- }
- }
-
- void ResetStub() {
- std::shared_ptr<Channel> channel =
- inproc_ ? server_->InProcessChannel(ChannelArguments())
- : grpc::CreateChannel(server_address_.str(),
- InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- // Test all rpc methods.
- void TestAllMethods() {
- SendEcho();
- SendSimpleClientStreaming();
- SendSimpleServerStreaming();
- SendBidiStreaming();
- }
-
- void SendEcho() {
- EchoRequest send_request;
- EchoResponse recv_response;
- ClientContext cli_ctx;
- cli_ctx.set_wait_for_ready(true);
- send_request.set_message("Hello");
- Status recv_status = stub_->Echo(&cli_ctx, send_request, &recv_response);
- EXPECT_EQ(send_request.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
-
- void SendEchoToDupService() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- auto stub = grpc::testing::duplicate::EchoTestService::NewStub(channel);
- EchoRequest send_request;
- EchoResponse recv_response;
- ClientContext cli_ctx;
- cli_ctx.set_wait_for_ready(true);
- send_request.set_message("Hello");
- Status recv_status = stub->Echo(&cli_ctx, send_request, &recv_response);
- EXPECT_EQ(send_request.message() + "_dup", recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
-
- void SendSimpleClientStreaming() {
- EchoRequest send_request;
- EchoResponse recv_response;
+ }
+
+ if (max_message_size != 0) {
+ builder.SetMaxMessageSize(max_message_size);
+ }
+
+ // Create a separate cq for each potential handler.
+ for (int i = 0; i < 5; i++) {
+ cqs_.push_back(builder.AddCompletionQueue(false));
+ }
+ server_ = builder.BuildAndStart();
+
+ // If there is a generic callback service, this setup is only successful if
+ // we have an iomgr that can run in the background or are inprocess
+ return !callback_generic_service || grpc_iomgr_run_in_background() ||
+ inproc_;
+ }
+
+ void TearDown() override {
+ if (server_) {
+ server_->Shutdown();
+ }
+ void* ignored_tag;
+ bool ignored_ok;
+ for (auto it = cqs_.begin(); it != cqs_.end(); ++it) {
+ (*it)->Shutdown();
+ while ((*it)->Next(&ignored_tag, &ignored_ok))
+ ;
+ }
+ }
+
+ void ResetStub() {
+ std::shared_ptr<Channel> channel =
+ inproc_ ? server_->InProcessChannel(ChannelArguments())
+ : grpc::CreateChannel(server_address_.str(),
+ InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ // Test all rpc methods.
+ void TestAllMethods() {
+ SendEcho();
+ SendSimpleClientStreaming();
+ SendSimpleServerStreaming();
+ SendBidiStreaming();
+ }
+
+ void SendEcho() {
+ EchoRequest send_request;
+ EchoResponse recv_response;
+ ClientContext cli_ctx;
+ cli_ctx.set_wait_for_ready(true);
+ send_request.set_message("Hello");
+ Status recv_status = stub_->Echo(&cli_ctx, send_request, &recv_response);
+ EXPECT_EQ(send_request.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+
+ void SendEchoToDupService() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ auto stub = grpc::testing::duplicate::EchoTestService::NewStub(channel);
+ EchoRequest send_request;
+ EchoResponse recv_response;
+ ClientContext cli_ctx;
+ cli_ctx.set_wait_for_ready(true);
+ send_request.set_message("Hello");
+ Status recv_status = stub->Echo(&cli_ctx, send_request, &recv_response);
+ EXPECT_EQ(send_request.message() + "_dup", recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+
+ void SendSimpleClientStreaming() {
+ EchoRequest send_request;
+ EchoResponse recv_response;
TString expected_message;
- ClientContext cli_ctx;
- cli_ctx.set_wait_for_ready(true);
- send_request.set_message("Hello");
- auto stream = stub_->RequestStream(&cli_ctx, &recv_response);
- for (int i = 0; i < 5; i++) {
- EXPECT_TRUE(stream->Write(send_request));
- expected_message.append(send_request.message());
- }
- stream->WritesDone();
- Status recv_status = stream->Finish();
- EXPECT_EQ(expected_message, recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
-
- void SendSimpleServerStreaming() {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_wait_for_ready(true);
- request.set_message("hello");
-
- auto stream = stub_->ResponseStream(&context, request);
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "0");
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "1");
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "2");
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
- }
-
- void SendSimpleServerStreamingToDupService() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- auto stub = grpc::testing::duplicate::EchoTestService::NewStub(channel);
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_wait_for_ready(true);
- request.set_message("hello");
-
- auto stream = stub->ResponseStream(&context, request);
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "0_dup");
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "1_dup");
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message() + "2_dup");
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
- }
-
- void SendBidiStreaming() {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_wait_for_ready(true);
+ ClientContext cli_ctx;
+ cli_ctx.set_wait_for_ready(true);
+ send_request.set_message("Hello");
+ auto stream = stub_->RequestStream(&cli_ctx, &recv_response);
+ for (int i = 0; i < 5; i++) {
+ EXPECT_TRUE(stream->Write(send_request));
+ expected_message.append(send_request.message());
+ }
+ stream->WritesDone();
+ Status recv_status = stream->Finish();
+ EXPECT_EQ(expected_message, recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+
+ void SendSimpleServerStreaming() {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_wait_for_ready(true);
+ request.set_message("hello");
+
+ auto stream = stub_->ResponseStream(&context, request);
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "0");
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "1");
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "2");
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+ }
+
+ void SendSimpleServerStreamingToDupService() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ auto stub = grpc::testing::duplicate::EchoTestService::NewStub(channel);
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_wait_for_ready(true);
+ request.set_message("hello");
+
+ auto stream = stub->ResponseStream(&context, request);
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "0_dup");
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "1_dup");
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message() + "2_dup");
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+ }
+
+ void SendBidiStreaming() {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_wait_for_ready(true);
TString msg("hello");
-
- auto stream = stub_->BidiStream(&context);
-
- request.set_message(msg + "0");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "1");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "2");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- stream->WritesDone();
- EXPECT_FALSE(stream->Read(&response));
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
- }
-
- grpc::testing::UnimplementedEchoService::Service unimplemented_service_;
- std::vector<std::unique_ptr<ServerCompletionQueue>> cqs_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
- bool inproc_;
-};
-
-TEST_F(HybridEnd2endTest, AsyncEcho) {
- typedef EchoTestService::WithAsyncMethod_Echo<TestServiceImpl> SType;
- SType service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- ResetStub();
- std::thread echo_handler_thread(HandleEcho<SType>, &service, cqs_[0].get(),
- false);
- TestAllMethods();
- echo_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, RawEcho) {
- typedef EchoTestService::WithRawMethod_Echo<TestServiceImpl> SType;
- SType service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- ResetStub();
- std::thread echo_handler_thread(HandleRawEcho<SType>, &service, cqs_[0].get(),
- false);
- TestAllMethods();
- echo_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, RawRequestStream) {
- typedef EchoTestService::WithRawMethod_RequestStream<TestServiceImpl> SType;
- SType service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- ResetStub();
- std::thread request_stream_handler_thread(HandleRawClientStreaming<SType>,
- &service, cqs_[0].get());
- TestAllMethods();
- request_stream_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, AsyncEchoRawRequestStream) {
- typedef EchoTestService::WithRawMethod_RequestStream<
- EchoTestService::WithAsyncMethod_Echo<TestServiceImpl>>
- SType;
- SType service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- ResetStub();
- std::thread echo_handler_thread(HandleEcho<SType>, &service, cqs_[0].get(),
- false);
- std::thread request_stream_handler_thread(HandleRawClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- request_stream_handler_thread.join();
- echo_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, GenericEchoRawRequestStream) {
- typedef EchoTestService::WithRawMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
- SType;
- SType service;
- AsyncGenericService generic_service;
- SetUpServer(&service, nullptr, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- std::thread request_stream_handler_thread(HandleRawClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- generic_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, AsyncEchoRequestStream) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_Echo<TestServiceImpl>>
- SType;
- SType service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- ResetStub();
- std::thread echo_handler_thread(HandleEcho<SType>, &service, cqs_[0].get(),
- false);
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- echo_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service with one sync method.
-TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream_SyncDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- TestServiceImplDupPkg dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendEchoToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service with one sync streamed unary method.
-class StreamedUnaryDupPkg
- : public duplicate::EchoTestService::WithStreamedUnaryMethod_Echo<
- TestServiceImplDupPkg> {
- public:
- Status StreamedEcho(
- ServerContext* /*context*/,
- ServerUnaryStreamer<EchoRequest, EchoResponse>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- uint32_t next_msg_sz;
- stream->NextMessageSize(&next_msg_sz);
- gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
- GPR_ASSERT(stream->Read(&req));
- resp.set_message(req.message() + "_dup");
- GPR_ASSERT(stream->Write(resp));
- return Status::OK;
- }
-};
-
-TEST_F(HybridEnd2endTest,
- AsyncRequestStreamResponseStream_SyncStreamedUnaryDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- StreamedUnaryDupPkg dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendEchoToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service that is fully Streamed Unary
-class FullyStreamedUnaryDupPkg
- : public duplicate::EchoTestService::StreamedUnaryService {
- public:
- Status StreamedEcho(
- ServerContext* /*context*/,
- ServerUnaryStreamer<EchoRequest, EchoResponse>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- uint32_t next_msg_sz;
- stream->NextMessageSize(&next_msg_sz);
- gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
- GPR_ASSERT(stream->Read(&req));
- resp.set_message(req.message() + "_dup");
- GPR_ASSERT(stream->Write(resp));
- return Status::OK;
- }
-};
-
-TEST_F(HybridEnd2endTest,
- AsyncRequestStreamResponseStream_SyncFullyStreamedUnaryDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- FullyStreamedUnaryDupPkg dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendEchoToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service with one sync split server streaming method.
-class SplitResponseStreamDupPkg
- : public duplicate::EchoTestService::
- WithSplitStreamingMethod_ResponseStream<TestServiceImplDupPkg> {
- public:
- Status StreamedResponseStream(
- ServerContext* /*context*/,
- ServerSplitStreamer<EchoRequest, EchoResponse>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- uint32_t next_msg_sz;
- stream->NextMessageSize(&next_msg_sz);
- gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
- GPR_ASSERT(stream->Read(&req));
- for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
+
+ auto stream = stub_->BidiStream(&context);
+
+ request.set_message(msg + "0");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "1");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "2");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ stream->WritesDone();
+ EXPECT_FALSE(stream->Read(&response));
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+ }
+
+ grpc::testing::UnimplementedEchoService::Service unimplemented_service_;
+ std::vector<std::unique_ptr<ServerCompletionQueue>> cqs_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ bool inproc_;
+};
+
+TEST_F(HybridEnd2endTest, AsyncEcho) {
+ typedef EchoTestService::WithAsyncMethod_Echo<TestServiceImpl> SType;
+ SType service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ ResetStub();
+ std::thread echo_handler_thread(HandleEcho<SType>, &service, cqs_[0].get(),
+ false);
+ TestAllMethods();
+ echo_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, RawEcho) {
+ typedef EchoTestService::WithRawMethod_Echo<TestServiceImpl> SType;
+ SType service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ ResetStub();
+ std::thread echo_handler_thread(HandleRawEcho<SType>, &service, cqs_[0].get(),
+ false);
+ TestAllMethods();
+ echo_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, RawRequestStream) {
+ typedef EchoTestService::WithRawMethod_RequestStream<TestServiceImpl> SType;
+ SType service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ ResetStub();
+ std::thread request_stream_handler_thread(HandleRawClientStreaming<SType>,
+ &service, cqs_[0].get());
+ TestAllMethods();
+ request_stream_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, AsyncEchoRawRequestStream) {
+ typedef EchoTestService::WithRawMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_Echo<TestServiceImpl>>
+ SType;
+ SType service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ ResetStub();
+ std::thread echo_handler_thread(HandleEcho<SType>, &service, cqs_[0].get(),
+ false);
+ std::thread request_stream_handler_thread(HandleRawClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ request_stream_handler_thread.join();
+ echo_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, GenericEchoRawRequestStream) {
+ typedef EchoTestService::WithRawMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
+ SType;
+ SType service;
+ AsyncGenericService generic_service;
+ SetUpServer(&service, nullptr, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleRawClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ generic_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, AsyncEchoRequestStream) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_Echo<TestServiceImpl>>
+ SType;
+ SType service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ ResetStub();
+ std::thread echo_handler_thread(HandleEcho<SType>, &service, cqs_[0].get(),
+ false);
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ echo_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service with one sync method.
+TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream_SyncDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ TestServiceImplDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service with one sync streamed unary method.
+class StreamedUnaryDupPkg
+ : public duplicate::EchoTestService::WithStreamedUnaryMethod_Echo<
+ TestServiceImplDupPkg> {
+ public:
+ Status StreamedEcho(
+ ServerContext* /*context*/,
+ ServerUnaryStreamer<EchoRequest, EchoResponse>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ resp.set_message(req.message() + "_dup");
+ GPR_ASSERT(stream->Write(resp));
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_SyncStreamedUnaryDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ StreamedUnaryDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service that is fully Streamed Unary
+class FullyStreamedUnaryDupPkg
+ : public duplicate::EchoTestService::StreamedUnaryService {
+ public:
+ Status StreamedEcho(
+ ServerContext* /*context*/,
+ ServerUnaryStreamer<EchoRequest, EchoResponse>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ resp.set_message(req.message() + "_dup");
+ GPR_ASSERT(stream->Write(resp));
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_SyncFullyStreamedUnaryDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ FullyStreamedUnaryDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service with one sync split server streaming method.
+class SplitResponseStreamDupPkg
+ : public duplicate::EchoTestService::
+ WithSplitStreamingMethod_ResponseStream<TestServiceImplDupPkg> {
+ public:
+ Status StreamedResponseStream(
+ ServerContext* /*context*/,
+ ServerSplitStreamer<EchoRequest, EchoResponse>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
resp.set_message(req.message() + ToString(i) + "_dup");
- GPR_ASSERT(stream->Write(resp));
- }
- return Status::OK;
- }
-};
-
-TEST_F(HybridEnd2endTest,
- AsyncRequestStreamResponseStream_SyncSplitStreamedDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- SplitResponseStreamDupPkg dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendSimpleServerStreamingToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service that is fully split server streamed
-class FullySplitStreamedDupPkg
- : public duplicate::EchoTestService::SplitStreamedService {
- public:
- Status StreamedResponseStream(
- ServerContext* /*context*/,
- ServerSplitStreamer<EchoRequest, EchoResponse>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- uint32_t next_msg_sz;
- stream->NextMessageSize(&next_msg_sz);
- gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
- GPR_ASSERT(stream->Read(&req));
- for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
+ GPR_ASSERT(stream->Write(resp));
+ }
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_SyncSplitStreamedDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ SplitResponseStreamDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendSimpleServerStreamingToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service that is fully split server streamed
+class FullySplitStreamedDupPkg
+ : public duplicate::EchoTestService::SplitStreamedService {
+ public:
+ Status StreamedResponseStream(
+ ServerContext* /*context*/,
+ ServerSplitStreamer<EchoRequest, EchoResponse>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
resp.set_message(req.message() + ToString(i) + "_dup");
- GPR_ASSERT(stream->Write(resp));
- }
- return Status::OK;
- }
-};
-
-TEST_F(HybridEnd2endTest,
- AsyncRequestStreamResponseStream_FullySplitStreamedDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- FullySplitStreamedDupPkg dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendSimpleServerStreamingToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service that is fully server streamed
-class FullyStreamedDupPkg : public duplicate::EchoTestService::StreamedService {
- public:
- Status StreamedEcho(
- ServerContext* /*context*/,
- ServerUnaryStreamer<EchoRequest, EchoResponse>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- uint32_t next_msg_sz;
- stream->NextMessageSize(&next_msg_sz);
- gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
- GPR_ASSERT(stream->Read(&req));
- resp.set_message(req.message() + "_dup");
- GPR_ASSERT(stream->Write(resp));
- return Status::OK;
- }
- Status StreamedResponseStream(
- ServerContext* /*context*/,
- ServerSplitStreamer<EchoRequest, EchoResponse>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- uint32_t next_msg_sz;
- stream->NextMessageSize(&next_msg_sz);
- gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
- GPR_ASSERT(stream->Read(&req));
- for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
+ GPR_ASSERT(stream->Write(resp));
+ }
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_FullySplitStreamedDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ FullySplitStreamedDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendSimpleServerStreamingToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service that is fully server streamed
+class FullyStreamedDupPkg : public duplicate::EchoTestService::StreamedService {
+ public:
+ Status StreamedEcho(
+ ServerContext* /*context*/,
+ ServerUnaryStreamer<EchoRequest, EchoResponse>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Streamed Unary Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ resp.set_message(req.message() + "_dup");
+ GPR_ASSERT(stream->Write(resp));
+ return Status::OK;
+ }
+ Status StreamedResponseStream(
+ ServerContext* /*context*/,
+ ServerSplitStreamer<EchoRequest, EchoResponse>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ uint32_t next_msg_sz;
+ stream->NextMessageSize(&next_msg_sz);
+ gpr_log(GPR_INFO, "Split Streamed Next Message Size is %u", next_msg_sz);
+ GPR_ASSERT(stream->Read(&req));
+ for (int i = 0; i < kServerDefaultResponseStreamsToSend; i++) {
resp.set_message(req.message() + ToString(i) + "_dup");
- GPR_ASSERT(stream->Write(resp));
- }
- return Status::OK;
- }
-};
-
-TEST_F(HybridEnd2endTest,
- AsyncRequestStreamResponseStream_FullyStreamedDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- FullyStreamedDupPkg dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendEchoToDupService();
- SendSimpleServerStreamingToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service with one async method.
-TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream_AsyncDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
- SType;
- SType service;
- duplicate::EchoTestService::AsyncService dup_service;
- SetUpServer(&service, &dup_service, nullptr, nullptr);
- ResetStub();
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- std::thread echo_handler_thread(
- HandleEcho<duplicate::EchoTestService::AsyncService>, &dup_service,
- cqs_[2].get(), true);
- TestAllMethods();
- SendEchoToDupService();
- response_stream_handler_thread.join();
- request_stream_handler_thread.join();
- echo_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, GenericEcho) {
- EchoTestService::WithGenericMethod_Echo<TestServiceImpl> service;
- AsyncGenericService generic_service;
- SetUpServer(&service, nullptr, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- TestAllMethods();
- generic_handler_thread.join();
-}
-
-TEST_P(HybridEnd2endTest, CallbackGenericEcho) {
- EchoTestService::WithGenericMethod_Echo<TestServiceImpl> service;
+ GPR_ASSERT(stream->Write(resp));
+ }
+ return Status::OK;
+ }
+};
+
+TEST_F(HybridEnd2endTest,
+ AsyncRequestStreamResponseStream_FullyStreamedDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ FullyStreamedDupPkg dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr, 8192);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ SendSimpleServerStreamingToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service with one async method.
+TEST_F(HybridEnd2endTest, AsyncRequestStreamResponseStream_AsyncDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>
+ SType;
+ SType service;
+ duplicate::EchoTestService::AsyncService dup_service;
+ SetUpServer(&service, &dup_service, nullptr, nullptr);
+ ResetStub();
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ std::thread echo_handler_thread(
+ HandleEcho<duplicate::EchoTestService::AsyncService>, &dup_service,
+ cqs_[2].get(), true);
+ TestAllMethods();
+ SendEchoToDupService();
+ response_stream_handler_thread.join();
+ request_stream_handler_thread.join();
+ echo_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, GenericEcho) {
+ EchoTestService::WithGenericMethod_Echo<TestServiceImpl> service;
+ AsyncGenericService generic_service;
+ SetUpServer(&service, nullptr, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ TestAllMethods();
+ generic_handler_thread.join();
+}
+
+TEST_P(HybridEnd2endTest, CallbackGenericEcho) {
+ EchoTestService::WithGenericMethod_Echo<TestServiceImpl> service;
class GenericEchoService : public CallbackGenericService {
- private:
+ private:
ServerGenericBidiReactor* CreateReactor(
GenericCallbackServerContext* context) override {
- EXPECT_EQ(context->method(), "/grpc.testing.EchoTestService/Echo");
+ EXPECT_EQ(context->method(), "/grpc.testing.EchoTestService/Echo");
gpr_log(GPR_DEBUG, "Constructor of generic service %d",
static_cast<int>(context->deadline().time_since_epoch().count()));
-
+
class Reactor : public ServerGenericBidiReactor {
- public:
- Reactor() { StartRead(&request_); }
-
- private:
- void OnDone() override { delete this; }
- void OnReadDone(bool ok) override {
- if (!ok) {
- EXPECT_EQ(reads_complete_, 1);
- } else {
- EXPECT_EQ(reads_complete_++, 0);
- response_ = request_;
- StartWrite(&response_);
- StartRead(&request_);
- }
- }
- void OnWriteDone(bool ok) override {
- Finish(ok ? Status::OK
- : Status(StatusCode::UNKNOWN, "Unexpected failure"));
- }
- ByteBuffer request_;
- ByteBuffer response_;
- std::atomic_int reads_complete_{0};
- };
- return new Reactor;
- }
- } generic_service;
-
- if (!SetUpServer(&service, nullptr, nullptr, &generic_service)) {
- return;
- }
- ResetStub();
- TestAllMethods();
-}
-
-TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStream) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
- SType;
- SType service;
- AsyncGenericService generic_service;
- SetUpServer(&service, nullptr, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- generic_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service with one sync method.
-TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStream_SyncDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
- SType;
- SType service;
- AsyncGenericService generic_service;
- TestServiceImplDupPkg dup_service;
- SetUpServer(&service, &dup_service, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- TestAllMethods();
- SendEchoToDupService();
- generic_handler_thread.join();
- request_stream_handler_thread.join();
-}
-
-// Add a second service with one async method.
-TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStream_AsyncDupService) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
- SType;
- SType service;
- AsyncGenericService generic_service;
- duplicate::EchoTestService::AsyncService dup_service;
- SetUpServer(&service, &dup_service, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- std::thread echo_handler_thread(
- HandleEcho<duplicate::EchoTestService::AsyncService>, &dup_service,
- cqs_[2].get(), true);
- TestAllMethods();
- SendEchoToDupService();
- generic_handler_thread.join();
- request_stream_handler_thread.join();
- echo_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStreamResponseStream) {
- typedef EchoTestService::WithAsyncMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>>
- SType;
- SType service;
- AsyncGenericService generic_service;
- SetUpServer(&service, nullptr, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
- &service, cqs_[1].get());
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[2].get());
- TestAllMethods();
- generic_handler_thread.join();
- request_stream_handler_thread.join();
- response_stream_handler_thread.join();
-}
-
-TEST_F(HybridEnd2endTest, GenericEchoRequestStreamAsyncResponseStream) {
- typedef EchoTestService::WithGenericMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>>
- SType;
- SType service;
- AsyncGenericService generic_service;
- SetUpServer(&service, nullptr, &generic_service, nullptr);
- ResetStub();
- std::thread generic_handler_thread(HandleGenericCall, &generic_service,
- cqs_[0].get());
- std::thread generic_handler_thread2(HandleGenericCall, &generic_service,
- cqs_[1].get());
- std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
- &service, cqs_[2].get());
- TestAllMethods();
- generic_handler_thread.join();
- generic_handler_thread2.join();
- response_stream_handler_thread.join();
-}
-
-// If WithGenericMethod is called and no generic service is registered, the
-// server will fail to build.
-TEST_F(HybridEnd2endTest, GenericMethodWithoutGenericService) {
- EchoTestService::WithGenericMethod_RequestStream<
- EchoTestService::WithGenericMethod_Echo<
- EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>>
- service;
- SetUpServer(&service, nullptr, nullptr, nullptr);
- EXPECT_EQ(nullptr, server_.get());
-}
-
-INSTANTIATE_TEST_SUITE_P(HybridEnd2endTest, HybridEnd2endTest,
- ::testing::Bool());
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ public:
+ Reactor() { StartRead(&request_); }
+
+ private:
+ void OnDone() override { delete this; }
+ void OnReadDone(bool ok) override {
+ if (!ok) {
+ EXPECT_EQ(reads_complete_, 1);
+ } else {
+ EXPECT_EQ(reads_complete_++, 0);
+ response_ = request_;
+ StartWrite(&response_);
+ StartRead(&request_);
+ }
+ }
+ void OnWriteDone(bool ok) override {
+ Finish(ok ? Status::OK
+ : Status(StatusCode::UNKNOWN, "Unexpected failure"));
+ }
+ ByteBuffer request_;
+ ByteBuffer response_;
+ std::atomic_int reads_complete_{0};
+ };
+ return new Reactor;
+ }
+ } generic_service;
+
+ if (!SetUpServer(&service, nullptr, nullptr, &generic_service)) {
+ return;
+ }
+ ResetStub();
+ TestAllMethods();
+}
+
+TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStream) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
+ SType;
+ SType service;
+ AsyncGenericService generic_service;
+ SetUpServer(&service, nullptr, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ generic_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service with one sync method.
+TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStream_SyncDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
+ SType;
+ SType service;
+ AsyncGenericService generic_service;
+ TestServiceImplDupPkg dup_service;
+ SetUpServer(&service, &dup_service, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ TestAllMethods();
+ SendEchoToDupService();
+ generic_handler_thread.join();
+ request_stream_handler_thread.join();
+}
+
+// Add a second service with one async method.
+TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStream_AsyncDupService) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<TestServiceImpl>>
+ SType;
+ SType service;
+ AsyncGenericService generic_service;
+ duplicate::EchoTestService::AsyncService dup_service;
+ SetUpServer(&service, &dup_service, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ std::thread echo_handler_thread(
+ HandleEcho<duplicate::EchoTestService::AsyncService>, &dup_service,
+ cqs_[2].get(), true);
+ TestAllMethods();
+ SendEchoToDupService();
+ generic_handler_thread.join();
+ request_stream_handler_thread.join();
+ echo_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, GenericEchoAsyncRequestStreamResponseStream) {
+ typedef EchoTestService::WithAsyncMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>>
+ SType;
+ SType service;
+ AsyncGenericService generic_service;
+ SetUpServer(&service, nullptr, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ std::thread request_stream_handler_thread(HandleClientStreaming<SType>,
+ &service, cqs_[1].get());
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[2].get());
+ TestAllMethods();
+ generic_handler_thread.join();
+ request_stream_handler_thread.join();
+ response_stream_handler_thread.join();
+}
+
+TEST_F(HybridEnd2endTest, GenericEchoRequestStreamAsyncResponseStream) {
+ typedef EchoTestService::WithGenericMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>>
+ SType;
+ SType service;
+ AsyncGenericService generic_service;
+ SetUpServer(&service, nullptr, &generic_service, nullptr);
+ ResetStub();
+ std::thread generic_handler_thread(HandleGenericCall, &generic_service,
+ cqs_[0].get());
+ std::thread generic_handler_thread2(HandleGenericCall, &generic_service,
+ cqs_[1].get());
+ std::thread response_stream_handler_thread(HandleServerStreaming<SType>,
+ &service, cqs_[2].get());
+ TestAllMethods();
+ generic_handler_thread.join();
+ generic_handler_thread2.join();
+ response_stream_handler_thread.join();
+}
+
+// If WithGenericMethod is called and no generic service is registered, the
+// server will fail to build.
+TEST_F(HybridEnd2endTest, GenericMethodWithoutGenericService) {
+ EchoTestService::WithGenericMethod_RequestStream<
+ EchoTestService::WithGenericMethod_Echo<
+ EchoTestService::WithAsyncMethod_ResponseStream<TestServiceImpl>>>
+ service;
+ SetUpServer(&service, nullptr, nullptr, nullptr);
+ EXPECT_EQ(nullptr, server_.get());
+}
+
+INSTANTIATE_TEST_SUITE_P(HybridEnd2endTest, HybridEnd2endTest,
+ ::testing::Bool());
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
index 2d9bcfcbec..ff88953651 100644
--- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.cc
@@ -1,102 +1,102 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/cpp/end2end/interceptors_util.h"
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/cpp/end2end/interceptors_util.h"
#include <util/string/cast.h>
-
-namespace grpc {
-namespace testing {
-
-std::atomic<int> DummyInterceptor::num_times_run_;
-std::atomic<int> DummyInterceptor::num_times_run_reverse_;
-std::atomic<int> DummyInterceptor::num_times_cancel_;
-
-void MakeCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- req.mutable_param()->set_echo_metadata(true);
- ctx.AddMetadata("testkey", "testvalue");
- req.set_message("Hello");
- EchoResponse resp;
- Status s = stub->Echo(&ctx, req, &resp);
- EXPECT_EQ(s.ok(), true);
- EXPECT_EQ(resp.message(), "Hello");
-}
-
-void MakeClientStreamingCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- req.mutable_param()->set_echo_metadata(true);
- ctx.AddMetadata("testkey", "testvalue");
- req.set_message("Hello");
- EchoResponse resp;
+
+namespace grpc {
+namespace testing {
+
+std::atomic<int> DummyInterceptor::num_times_run_;
+std::atomic<int> DummyInterceptor::num_times_run_reverse_;
+std::atomic<int> DummyInterceptor::num_times_cancel_;
+
+void MakeCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ req.mutable_param()->set_echo_metadata(true);
+ ctx.AddMetadata("testkey", "testvalue");
+ req.set_message("Hello");
+ EchoResponse resp;
+ Status s = stub->Echo(&ctx, req, &resp);
+ EXPECT_EQ(s.ok(), true);
+ EXPECT_EQ(resp.message(), "Hello");
+}
+
+void MakeClientStreamingCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ req.mutable_param()->set_echo_metadata(true);
+ ctx.AddMetadata("testkey", "testvalue");
+ req.set_message("Hello");
+ EchoResponse resp;
string expected_resp = "";
- auto writer = stub->RequestStream(&ctx, &resp);
- for (int i = 0; i < kNumStreamingMessages; i++) {
- writer->Write(req);
- expected_resp += "Hello";
- }
- writer->WritesDone();
- Status s = writer->Finish();
- EXPECT_EQ(s.ok(), true);
- EXPECT_EQ(resp.message(), expected_resp);
-}
-
-void MakeServerStreamingCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- req.mutable_param()->set_echo_metadata(true);
- ctx.AddMetadata("testkey", "testvalue");
- req.set_message("Hello");
- EchoResponse resp;
- auto reader = stub->ResponseStream(&ctx, req);
- int count = 0;
- while (reader->Read(&resp)) {
- EXPECT_EQ(resp.message(), "Hello");
- count++;
- }
- ASSERT_EQ(count, kNumStreamingMessages);
- Status s = reader->Finish();
- EXPECT_EQ(s.ok(), true);
-}
-
-void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- EchoResponse resp;
- ctx.AddMetadata("testkey", "testvalue");
+ auto writer = stub->RequestStream(&ctx, &resp);
+ for (int i = 0; i < kNumStreamingMessages; i++) {
+ writer->Write(req);
+ expected_resp += "Hello";
+ }
+ writer->WritesDone();
+ Status s = writer->Finish();
+ EXPECT_EQ(s.ok(), true);
+ EXPECT_EQ(resp.message(), expected_resp);
+}
+
+void MakeServerStreamingCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
req.mutable_param()->set_echo_metadata(true);
- auto stream = stub->BidiStream(&ctx);
- for (auto i = 0; i < kNumStreamingMessages; i++) {
+ ctx.AddMetadata("testkey", "testvalue");
+ req.set_message("Hello");
+ EchoResponse resp;
+ auto reader = stub->ResponseStream(&ctx, req);
+ int count = 0;
+ while (reader->Read(&resp)) {
+ EXPECT_EQ(resp.message(), "Hello");
+ count++;
+ }
+ ASSERT_EQ(count, kNumStreamingMessages);
+ Status s = reader->Finish();
+ EXPECT_EQ(s.ok(), true);
+}
+
+void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ EchoResponse resp;
+ ctx.AddMetadata("testkey", "testvalue");
+ req.mutable_param()->set_echo_metadata(true);
+ auto stream = stub->BidiStream(&ctx);
+ for (auto i = 0; i < kNumStreamingMessages; i++) {
req.set_message(TString("Hello") + ::ToString(i));
- stream->Write(req);
- stream->Read(&resp);
- EXPECT_EQ(req.message(), resp.message());
- }
- ASSERT_TRUE(stream->WritesDone());
- Status s = stream->Finish();
- EXPECT_EQ(s.ok(), true);
-}
-
+ stream->Write(req);
+ stream->Read(&resp);
+ EXPECT_EQ(req.message(), resp.message());
+ }
+ ASSERT_TRUE(stream->WritesDone());
+ Status s = stream->Finish();
+ EXPECT_EQ(s.ok(), true);
+}
+
void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel) {
auto stub = grpc::testing::EchoTestService::NewStub(channel);
CompletionQueue cq;
@@ -152,63 +152,63 @@ void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& /*channel*/) {
// TODO(yashykt) : Fill this out
}
-void MakeCallbackCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- std::mutex mu;
- std::condition_variable cv;
- bool done = false;
- req.mutable_param()->set_echo_metadata(true);
- ctx.AddMetadata("testkey", "testvalue");
- req.set_message("Hello");
- EchoResponse resp;
- stub->experimental_async()->Echo(&ctx, &req, &resp,
- [&resp, &mu, &done, &cv](Status s) {
- EXPECT_EQ(s.ok(), true);
- EXPECT_EQ(resp.message(), "Hello");
- std::lock_guard<std::mutex> l(mu);
- done = true;
- cv.notify_one();
- });
- std::unique_lock<std::mutex> l(mu);
- while (!done) {
- cv.wait(l);
- }
-}
-
-bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map,
- const string& key, const string& value) {
- for (const auto& pair : map) {
- if (pair.first.starts_with(key) && pair.second.starts_with(value)) {
- return true;
- }
- }
- return false;
-}
-
+void MakeCallbackCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ std::mutex mu;
+ std::condition_variable cv;
+ bool done = false;
+ req.mutable_param()->set_echo_metadata(true);
+ ctx.AddMetadata("testkey", "testvalue");
+ req.set_message("Hello");
+ EchoResponse resp;
+ stub->experimental_async()->Echo(&ctx, &req, &resp,
+ [&resp, &mu, &done, &cv](Status s) {
+ EXPECT_EQ(s.ok(), true);
+ EXPECT_EQ(resp.message(), "Hello");
+ std::lock_guard<std::mutex> l(mu);
+ done = true;
+ cv.notify_one();
+ });
+ std::unique_lock<std::mutex> l(mu);
+ while (!done) {
+ cv.wait(l);
+ }
+}
+
+bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map,
+ const string& key, const string& value) {
+ for (const auto& pair : map) {
+ if (pair.first.starts_with(key) && pair.second.starts_with(value)) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool CheckMetadata(const std::multimap<TString, TString>& map,
- const string& key, const string& value) {
- for (const auto& pair : map) {
- if (pair.first == key.c_str() && pair.second == value.c_str()) {
- return true;
- }
- }
- return false;
-}
-
-std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
-CreateDummyClientInterceptors() {
- std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- creators;
- // Add 20 dummy interceptors before hijacking interceptor
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- return creators;
-}
-
-} // namespace testing
-} // namespace grpc
+ const string& key, const string& value) {
+ for (const auto& pair : map) {
+ if (pair.first == key.c_str() && pair.second == value.c_str()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+CreateDummyClientInterceptors() {
+ std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ creators;
+ // Add 20 dummy interceptors before hijacking interceptor
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ return creators;
+}
+
+} // namespace testing
+} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
index 832a3870c8..c95170bbbc 100644
--- a/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
+++ b/contrib/libs/grpc/test/cpp/end2end/interceptors_util.h
@@ -1,107 +1,107 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <condition_variable>
-
-#include <grpcpp/channel.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/cpp/util/string_ref_helper.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-/* This interceptor does nothing. Just keeps a global count on the number of
- * times it was invoked. */
-class DummyInterceptor : public experimental::Interceptor {
- public:
- DummyInterceptor() {}
-
- virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- num_times_run_++;
- } else if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::
- POST_RECV_INITIAL_METADATA)) {
- num_times_run_reverse_++;
- } else if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_CANCEL)) {
- num_times_cancel_++;
- }
- methods->Proceed();
- }
-
- static void Reset() {
- num_times_run_.store(0);
- num_times_run_reverse_.store(0);
- num_times_cancel_.store(0);
- }
-
- static int GetNumTimesRun() {
- EXPECT_EQ(num_times_run_.load(), num_times_run_reverse_.load());
- return num_times_run_.load();
- }
-
- static int GetNumTimesCancel() { return num_times_cancel_.load(); }
-
- private:
- static std::atomic<int> num_times_run_;
- static std::atomic<int> num_times_run_reverse_;
- static std::atomic<int> num_times_cancel_;
-};
-
-class DummyInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface,
- public experimental::ServerInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* /*info*/) override {
- return new DummyInterceptor();
- }
-
- virtual experimental::Interceptor* CreateServerInterceptor(
- experimental::ServerRpcInfo* /*info*/) override {
- return new DummyInterceptor();
- }
-};
-
-/* This interceptor factory returns nullptr on interceptor creation */
-class NullInterceptorFactory
- : public experimental::ClientInterceptorFactoryInterface,
- public experimental::ServerInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateClientInterceptor(
- experimental::ClientRpcInfo* /*info*/) override {
- return nullptr;
- }
-
- virtual experimental::Interceptor* CreateServerInterceptor(
- experimental::ServerRpcInfo* /*info*/) override {
- return nullptr;
- }
-};
-
-class EchoTestServiceStreamingImpl : public EchoTestService::Service {
- public:
- ~EchoTestServiceStreamingImpl() override {}
-
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <condition_variable>
+
+#include <grpcpp/channel.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/cpp/util/string_ref_helper.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+/* This interceptor does nothing. Just keeps a global count on the number of
+ * times it was invoked. */
+class DummyInterceptor : public experimental::Interceptor {
+ public:
+ DummyInterceptor() {}
+
+ virtual void Intercept(experimental::InterceptorBatchMethods* methods) {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ num_times_run_++;
+ } else if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::
+ POST_RECV_INITIAL_METADATA)) {
+ num_times_run_reverse_++;
+ } else if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_CANCEL)) {
+ num_times_cancel_++;
+ }
+ methods->Proceed();
+ }
+
+ static void Reset() {
+ num_times_run_.store(0);
+ num_times_run_reverse_.store(0);
+ num_times_cancel_.store(0);
+ }
+
+ static int GetNumTimesRun() {
+ EXPECT_EQ(num_times_run_.load(), num_times_run_reverse_.load());
+ return num_times_run_.load();
+ }
+
+ static int GetNumTimesCancel() { return num_times_cancel_.load(); }
+
+ private:
+ static std::atomic<int> num_times_run_;
+ static std::atomic<int> num_times_run_reverse_;
+ static std::atomic<int> num_times_cancel_;
+};
+
+class DummyInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface,
+ public experimental::ServerInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* /*info*/) override {
+ return new DummyInterceptor();
+ }
+
+ virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::ServerRpcInfo* /*info*/) override {
+ return new DummyInterceptor();
+ }
+};
+
+/* This interceptor factory returns nullptr on interceptor creation */
+class NullInterceptorFactory
+ : public experimental::ClientInterceptorFactoryInterface,
+ public experimental::ServerInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateClientInterceptor(
+ experimental::ClientRpcInfo* /*info*/) override {
+ return nullptr;
+ }
+
+ virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::ServerRpcInfo* /*info*/) override {
+ return nullptr;
+ }
+};
+
+class EchoTestServiceStreamingImpl : public EchoTestService::Service {
+ public:
+ ~EchoTestServiceStreamingImpl() override {}
+
Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
auto client_metadata = context->client_metadata();
@@ -112,66 +112,66 @@ class EchoTestServiceStreamingImpl : public EchoTestService::Service {
return Status::OK;
}
- Status BidiStream(
- ServerContext* context,
- grpc::ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
- EchoRequest req;
- EchoResponse resp;
- auto client_metadata = context->client_metadata();
- for (const auto& pair : client_metadata) {
- context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
- }
-
- while (stream->Read(&req)) {
- resp.set_message(req.message());
- EXPECT_TRUE(stream->Write(resp, grpc::WriteOptions()));
- }
- return Status::OK;
- }
-
- Status RequestStream(ServerContext* context,
- ServerReader<EchoRequest>* reader,
- EchoResponse* resp) override {
- auto client_metadata = context->client_metadata();
- for (const auto& pair : client_metadata) {
- context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
- }
-
- EchoRequest req;
+ Status BidiStream(
+ ServerContext* context,
+ grpc::ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
+ EchoRequest req;
+ EchoResponse resp;
+ auto client_metadata = context->client_metadata();
+ for (const auto& pair : client_metadata) {
+ context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
+ }
+
+ while (stream->Read(&req)) {
+ resp.set_message(req.message());
+ EXPECT_TRUE(stream->Write(resp, grpc::WriteOptions()));
+ }
+ return Status::OK;
+ }
+
+ Status RequestStream(ServerContext* context,
+ ServerReader<EchoRequest>* reader,
+ EchoResponse* resp) override {
+ auto client_metadata = context->client_metadata();
+ for (const auto& pair : client_metadata) {
+ context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
+ }
+
+ EchoRequest req;
string response_str = "";
- while (reader->Read(&req)) {
- response_str += req.message();
- }
- resp->set_message(response_str);
- return Status::OK;
- }
-
- Status ResponseStream(ServerContext* context, const EchoRequest* req,
- ServerWriter<EchoResponse>* writer) override {
- auto client_metadata = context->client_metadata();
- for (const auto& pair : client_metadata) {
- context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
- }
-
- EchoResponse resp;
- resp.set_message(req->message());
- for (int i = 0; i < 10; i++) {
- EXPECT_TRUE(writer->Write(resp));
- }
- return Status::OK;
- }
-};
-
-constexpr int kNumStreamingMessages = 10;
-
-void MakeCall(const std::shared_ptr<Channel>& channel);
-
-void MakeClientStreamingCall(const std::shared_ptr<Channel>& channel);
-
-void MakeServerStreamingCall(const std::shared_ptr<Channel>& channel);
-
-void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel);
-
+ while (reader->Read(&req)) {
+ response_str += req.message();
+ }
+ resp->set_message(response_str);
+ return Status::OK;
+ }
+
+ Status ResponseStream(ServerContext* context, const EchoRequest* req,
+ ServerWriter<EchoResponse>* writer) override {
+ auto client_metadata = context->client_metadata();
+ for (const auto& pair : client_metadata) {
+ context->AddTrailingMetadata(ToString(pair.first), ToString(pair.second));
+ }
+
+ EchoResponse resp;
+ resp.set_message(req->message());
+ for (int i = 0; i < 10; i++) {
+ EXPECT_TRUE(writer->Write(resp));
+ }
+ return Status::OK;
+ }
+};
+
+constexpr int kNumStreamingMessages = 10;
+
+void MakeCall(const std::shared_ptr<Channel>& channel);
+
+void MakeClientStreamingCall(const std::shared_ptr<Channel>& channel);
+
+void MakeServerStreamingCall(const std::shared_ptr<Channel>& channel);
+
+void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel);
+
void MakeAsyncCQCall(const std::shared_ptr<Channel>& channel);
void MakeAsyncCQClientStreamingCall(const std::shared_ptr<Channel>& channel);
@@ -180,138 +180,138 @@ void MakeAsyncCQServerStreamingCall(const std::shared_ptr<Channel>& channel);
void MakeAsyncCQBidiStreamingCall(const std::shared_ptr<Channel>& channel);
-void MakeCallbackCall(const std::shared_ptr<Channel>& channel);
-
-bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map,
- const string& key, const string& value);
-
+void MakeCallbackCall(const std::shared_ptr<Channel>& channel);
+
+bool CheckMetadata(const std::multimap<grpc::string_ref, grpc::string_ref>& map,
+ const string& key, const string& value);
+
bool CheckMetadata(const std::multimap<TString, TString>& map,
- const string& key, const string& value);
-
-std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
-CreateDummyClientInterceptors();
-
-inline void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
-inline int detag(void* p) {
- return static_cast<int>(reinterpret_cast<intptr_t>(p));
-}
-
-class Verifier {
- public:
- Verifier() : lambda_run_(false) {}
- // Expect sets the expected ok value for a specific tag
- Verifier& Expect(int i, bool expect_ok) {
- return ExpectUnless(i, expect_ok, false);
- }
- // ExpectUnless sets the expected ok value for a specific tag
- // unless the tag was already marked seen (as a result of ExpectMaybe)
- Verifier& ExpectUnless(int i, bool expect_ok, bool seen) {
- if (!seen) {
- expectations_[tag(i)] = expect_ok;
- }
- return *this;
- }
- // ExpectMaybe sets the expected ok value for a specific tag, but does not
- // require it to appear
- // If it does, sets *seen to true
- Verifier& ExpectMaybe(int i, bool expect_ok, bool* seen) {
- if (!*seen) {
- maybe_expectations_[tag(i)] = MaybeExpect{expect_ok, seen};
- }
- return *this;
- }
-
- // Next waits for 1 async tag to complete, checks its
- // expectations, and returns the tag
- int Next(CompletionQueue* cq, bool ignore_ok) {
- bool ok;
- void* got_tag;
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- GotTag(got_tag, ok, ignore_ok);
- return detag(got_tag);
- }
-
- template <typename T>
- CompletionQueue::NextStatus DoOnceThenAsyncNext(
- CompletionQueue* cq, void** got_tag, bool* ok, T deadline,
- std::function<void(void)> lambda) {
- if (lambda_run_) {
- return cq->AsyncNext(got_tag, ok, deadline);
- } else {
- lambda_run_ = true;
- return cq->DoThenAsyncNext(lambda, got_tag, ok, deadline);
- }
- }
-
- // Verify keeps calling Next until all currently set
- // expected tags are complete
- void Verify(CompletionQueue* cq) { Verify(cq, false); }
-
- // This version of Verify allows optionally ignoring the
- // outcome of the expectation
- void Verify(CompletionQueue* cq, bool ignore_ok) {
- GPR_ASSERT(!expectations_.empty() || !maybe_expectations_.empty());
- while (!expectations_.empty()) {
- Next(cq, ignore_ok);
- }
- }
-
- // This version of Verify stops after a certain deadline, and uses the
- // DoThenAsyncNext API
- // to call the lambda
- void Verify(CompletionQueue* cq,
- std::chrono::system_clock::time_point deadline,
- const std::function<void(void)>& lambda) {
- if (expectations_.empty()) {
- bool ok;
- void* got_tag;
- EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
- CompletionQueue::TIMEOUT);
- } else {
- while (!expectations_.empty()) {
- bool ok;
- void* got_tag;
- EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
- CompletionQueue::GOT_EVENT);
- GotTag(got_tag, ok, false);
- }
- }
- }
-
- private:
- void GotTag(void* got_tag, bool ok, bool ignore_ok) {
- auto it = expectations_.find(got_tag);
- if (it != expectations_.end()) {
- if (!ignore_ok) {
- EXPECT_EQ(it->second, ok);
- }
- expectations_.erase(it);
- } else {
- auto it2 = maybe_expectations_.find(got_tag);
- if (it2 != maybe_expectations_.end()) {
- if (it2->second.seen != nullptr) {
- EXPECT_FALSE(*it2->second.seen);
- *it2->second.seen = true;
- }
- if (!ignore_ok) {
- EXPECT_EQ(it2->second.ok, ok);
- }
- } else {
- gpr_log(GPR_ERROR, "Unexpected tag: %p", got_tag);
- abort();
- }
- }
- }
-
- struct MaybeExpect {
- bool ok;
- bool* seen;
- };
-
- std::map<void*, bool> expectations_;
- std::map<void*, MaybeExpect> maybe_expectations_;
- bool lambda_run_;
-};
-
-} // namespace testing
-} // namespace grpc
+ const string& key, const string& value);
+
+std::vector<std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+CreateDummyClientInterceptors();
+
+inline void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+inline int detag(void* p) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(p));
+}
+
+class Verifier {
+ public:
+ Verifier() : lambda_run_(false) {}
+ // Expect sets the expected ok value for a specific tag
+ Verifier& Expect(int i, bool expect_ok) {
+ return ExpectUnless(i, expect_ok, false);
+ }
+ // ExpectUnless sets the expected ok value for a specific tag
+ // unless the tag was already marked seen (as a result of ExpectMaybe)
+ Verifier& ExpectUnless(int i, bool expect_ok, bool seen) {
+ if (!seen) {
+ expectations_[tag(i)] = expect_ok;
+ }
+ return *this;
+ }
+ // ExpectMaybe sets the expected ok value for a specific tag, but does not
+ // require it to appear
+ // If it does, sets *seen to true
+ Verifier& ExpectMaybe(int i, bool expect_ok, bool* seen) {
+ if (!*seen) {
+ maybe_expectations_[tag(i)] = MaybeExpect{expect_ok, seen};
+ }
+ return *this;
+ }
+
+ // Next waits for 1 async tag to complete, checks its
+ // expectations, and returns the tag
+ int Next(CompletionQueue* cq, bool ignore_ok) {
+ bool ok;
+ void* got_tag;
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ GotTag(got_tag, ok, ignore_ok);
+ return detag(got_tag);
+ }
+
+ template <typename T>
+ CompletionQueue::NextStatus DoOnceThenAsyncNext(
+ CompletionQueue* cq, void** got_tag, bool* ok, T deadline,
+ std::function<void(void)> lambda) {
+ if (lambda_run_) {
+ return cq->AsyncNext(got_tag, ok, deadline);
+ } else {
+ lambda_run_ = true;
+ return cq->DoThenAsyncNext(lambda, got_tag, ok, deadline);
+ }
+ }
+
+ // Verify keeps calling Next until all currently set
+ // expected tags are complete
+ void Verify(CompletionQueue* cq) { Verify(cq, false); }
+
+ // This version of Verify allows optionally ignoring the
+ // outcome of the expectation
+ void Verify(CompletionQueue* cq, bool ignore_ok) {
+ GPR_ASSERT(!expectations_.empty() || !maybe_expectations_.empty());
+ while (!expectations_.empty()) {
+ Next(cq, ignore_ok);
+ }
+ }
+
+ // This version of Verify stops after a certain deadline, and uses the
+ // DoThenAsyncNext API
+ // to call the lambda
+ void Verify(CompletionQueue* cq,
+ std::chrono::system_clock::time_point deadline,
+ const std::function<void(void)>& lambda) {
+ if (expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::TIMEOUT);
+ } else {
+ while (!expectations_.empty()) {
+ bool ok;
+ void* got_tag;
+ EXPECT_EQ(DoOnceThenAsyncNext(cq, &got_tag, &ok, deadline, lambda),
+ CompletionQueue::GOT_EVENT);
+ GotTag(got_tag, ok, false);
+ }
+ }
+ }
+
+ private:
+ void GotTag(void* got_tag, bool ok, bool ignore_ok) {
+ auto it = expectations_.find(got_tag);
+ if (it != expectations_.end()) {
+ if (!ignore_ok) {
+ EXPECT_EQ(it->second, ok);
+ }
+ expectations_.erase(it);
+ } else {
+ auto it2 = maybe_expectations_.find(got_tag);
+ if (it2 != maybe_expectations_.end()) {
+ if (it2->second.seen != nullptr) {
+ EXPECT_FALSE(*it2->second.seen);
+ *it2->second.seen = true;
+ }
+ if (!ignore_ok) {
+ EXPECT_EQ(it2->second.ok, ok);
+ }
+ } else {
+ gpr_log(GPR_ERROR, "Unexpected tag: %p", got_tag);
+ abort();
+ }
+ }
+ }
+
+ struct MaybeExpect {
+ bool ok;
+ bool* seen;
+ };
+
+ std::map<void*, bool> expectations_;
+ std::map<void*, MaybeExpect> maybe_expectations_;
+ bool lambda_run_;
+};
+
+} // namespace testing
+} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
index 623fac0020..a3d61c4e98 100644
--- a/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/mock_test.cc
@@ -1,47 +1,47 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <climits>
-
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-#include <grpcpp/test/default_reactor_test_peer.h>
-
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "src/proto/grpc/testing/echo_mock.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-
-#include <grpcpp/test/mock_stream.h>
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-#include <iostream>
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <climits>
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/test/default_reactor_test_peer.h>
+
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "src/proto/grpc/testing/echo_mock.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+#include <grpcpp/test/mock_stream.h>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <iostream>
+
using grpc::testing::DefaultReactorTestPeer;
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
@@ -50,385 +50,385 @@ using grpc::testing::MockClientReaderWriter;
using std::vector;
using std::chrono::system_clock;
using ::testing::_;
-using ::testing::AtLeast;
-using ::testing::DoAll;
-using ::testing::Invoke;
-using ::testing::Return;
-using ::testing::SaveArg;
-using ::testing::SetArgPointee;
-using ::testing::WithArg;
-
-namespace grpc {
-namespace testing {
-
-namespace {
-class FakeClient {
- public:
- explicit FakeClient(EchoTestService::StubInterface* stub) : stub_(stub) {}
-
- void DoEcho() {
- ClientContext context;
- EchoRequest request;
- EchoResponse response;
- request.set_message("hello world");
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(request.message(), response.message());
- EXPECT_TRUE(s.ok());
- }
-
- void DoRequestStream() {
- EchoRequest request;
- EchoResponse response;
-
- ClientContext context;
+using ::testing::AtLeast;
+using ::testing::DoAll;
+using ::testing::Invoke;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::SetArgPointee;
+using ::testing::WithArg;
+
+namespace grpc {
+namespace testing {
+
+namespace {
+class FakeClient {
+ public:
+ explicit FakeClient(EchoTestService::StubInterface* stub) : stub_(stub) {}
+
+ void DoEcho() {
+ ClientContext context;
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("hello world");
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(request.message(), response.message());
+ EXPECT_TRUE(s.ok());
+ }
+
+ void DoRequestStream() {
+ EchoRequest request;
+ EchoResponse response;
+
+ ClientContext context;
TString msg("hello");
TString exp(msg);
-
- std::unique_ptr<ClientWriterInterface<EchoRequest>> cstream =
- stub_->RequestStream(&context, &response);
-
- request.set_message(msg);
- EXPECT_TRUE(cstream->Write(request));
-
- msg = ", world";
- request.set_message(msg);
- exp.append(msg);
- EXPECT_TRUE(cstream->Write(request));
-
- cstream->WritesDone();
- Status s = cstream->Finish();
-
- EXPECT_EQ(exp, response.message());
- EXPECT_TRUE(s.ok());
- }
-
- void DoResponseStream() {
- EchoRequest request;
- EchoResponse response;
- request.set_message("hello world");
-
- ClientContext context;
- std::unique_ptr<ClientReaderInterface<EchoResponse>> cstream =
- stub_->ResponseStream(&context, request);
-
+
+ std::unique_ptr<ClientWriterInterface<EchoRequest>> cstream =
+ stub_->RequestStream(&context, &response);
+
+ request.set_message(msg);
+ EXPECT_TRUE(cstream->Write(request));
+
+ msg = ", world";
+ request.set_message(msg);
+ exp.append(msg);
+ EXPECT_TRUE(cstream->Write(request));
+
+ cstream->WritesDone();
+ Status s = cstream->Finish();
+
+ EXPECT_EQ(exp, response.message());
+ EXPECT_TRUE(s.ok());
+ }
+
+ void DoResponseStream() {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("hello world");
+
+ ClientContext context;
+ std::unique_ptr<ClientReaderInterface<EchoResponse>> cstream =
+ stub_->ResponseStream(&context, request);
+
TString exp = "";
- EXPECT_TRUE(cstream->Read(&response));
- exp.append(response.message() + " ");
-
- EXPECT_TRUE(cstream->Read(&response));
- exp.append(response.message());
-
- EXPECT_FALSE(cstream->Read(&response));
- EXPECT_EQ(request.message(), exp);
-
- Status s = cstream->Finish();
- EXPECT_TRUE(s.ok());
- }
-
- void DoBidiStream() {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
+ EXPECT_TRUE(cstream->Read(&response));
+ exp.append(response.message() + " ");
+
+ EXPECT_TRUE(cstream->Read(&response));
+ exp.append(response.message());
+
+ EXPECT_FALSE(cstream->Read(&response));
+ EXPECT_EQ(request.message(), exp);
+
+ Status s = cstream->Finish();
+ EXPECT_TRUE(s.ok());
+ }
+
+ void DoBidiStream() {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
TString msg("hello");
-
- std::unique_ptr<ClientReaderWriterInterface<EchoRequest, EchoResponse>>
- stream = stub_->BidiStream(&context);
-
- request.set_message(msg + "0");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "1");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- request.set_message(msg + "2");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(response.message(), request.message());
-
- stream->WritesDone();
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- EXPECT_TRUE(s.ok());
- }
-
- void ResetStub(EchoTestService::StubInterface* stub) { stub_ = stub; }
-
- private:
- EchoTestService::StubInterface* stub_;
-};
-
-class CallbackTestServiceImpl
- : public EchoTestService::ExperimentalCallbackService {
- public:
- experimental::ServerUnaryReactor* Echo(
- experimental::CallbackServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- // Make the mock service explicitly treat empty input messages as invalid
- // arguments so that we can test various results of status. In general, a
- // mocked service should just use the original service methods, but we are
- // adding this variance in Status return value just to improve coverage in
- // this test.
- auto* reactor = context->DefaultReactor();
- if (request->message().length() > 0) {
- response->set_message(request->message());
- reactor->Finish(Status::OK);
- } else {
- reactor->Finish(Status(StatusCode::INVALID_ARGUMENT, "Invalid request"));
- }
- return reactor;
- }
-};
-
-class MockCallbackTest : public ::testing::Test {
- protected:
- CallbackTestServiceImpl service_;
- ServerContext context_;
-};
-
-TEST_F(MockCallbackTest, MockedCallSucceedsWithWait) {
- experimental::CallbackServerContext ctx;
- EchoRequest req;
- EchoResponse resp;
- grpc::internal::Mutex mu;
- grpc::internal::CondVar cv;
- grpc::Status status;
- bool status_set = false;
- DefaultReactorTestPeer peer(&ctx, [&](::grpc::Status s) {
- grpc::internal::MutexLock l(&mu);
- status_set = true;
- status = std::move(s);
- cv.Signal();
- });
-
- req.set_message("mock 1");
- auto* reactor = service_.Echo(&ctx, &req, &resp);
- cv.WaitUntil(&mu, [&] {
- grpc::internal::MutexLock l(&mu);
- return status_set;
- });
- EXPECT_EQ(reactor, peer.reactor());
- EXPECT_TRUE(peer.test_status_set());
- EXPECT_TRUE(peer.test_status().ok());
- EXPECT_TRUE(status_set);
- EXPECT_TRUE(status.ok());
- EXPECT_EQ(req.message(), resp.message());
-}
-
-TEST_F(MockCallbackTest, MockedCallSucceeds) {
- experimental::CallbackServerContext ctx;
- EchoRequest req;
- EchoResponse resp;
- DefaultReactorTestPeer peer(&ctx);
-
- req.set_message("ha ha, consider yourself mocked.");
- auto* reactor = service_.Echo(&ctx, &req, &resp);
- EXPECT_EQ(reactor, peer.reactor());
- EXPECT_TRUE(peer.test_status_set());
- EXPECT_TRUE(peer.test_status().ok());
-}
-
-TEST_F(MockCallbackTest, MockedCallFails) {
- experimental::CallbackServerContext ctx;
- EchoRequest req;
- EchoResponse resp;
- DefaultReactorTestPeer peer(&ctx);
-
- auto* reactor = service_.Echo(&ctx, &req, &resp);
- EXPECT_EQ(reactor, peer.reactor());
- EXPECT_TRUE(peer.test_status_set());
- EXPECT_EQ(peer.test_status().error_code(), StatusCode::INVALID_ARGUMENT);
-}
-
-class TestServiceImpl : public EchoTestService::Service {
- public:
- Status Echo(ServerContext* /*context*/, const EchoRequest* request,
- EchoResponse* response) override {
- response->set_message(request->message());
- return Status::OK;
- }
-
- Status RequestStream(ServerContext* /*context*/,
- ServerReader<EchoRequest>* reader,
- EchoResponse* response) override {
- EchoRequest request;
+
+ std::unique_ptr<ClientReaderWriterInterface<EchoRequest, EchoResponse>>
+ stream = stub_->BidiStream(&context);
+
+ request.set_message(msg + "0");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "1");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ request.set_message(msg + "2");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(response.message(), request.message());
+
+ stream->WritesDone();
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ EXPECT_TRUE(s.ok());
+ }
+
+ void ResetStub(EchoTestService::StubInterface* stub) { stub_ = stub; }
+
+ private:
+ EchoTestService::StubInterface* stub_;
+};
+
+class CallbackTestServiceImpl
+ : public EchoTestService::ExperimentalCallbackService {
+ public:
+ experimental::ServerUnaryReactor* Echo(
+ experimental::CallbackServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ // Make the mock service explicitly treat empty input messages as invalid
+ // arguments so that we can test various results of status. In general, a
+ // mocked service should just use the original service methods, but we are
+ // adding this variance in Status return value just to improve coverage in
+ // this test.
+ auto* reactor = context->DefaultReactor();
+ if (request->message().length() > 0) {
+ response->set_message(request->message());
+ reactor->Finish(Status::OK);
+ } else {
+ reactor->Finish(Status(StatusCode::INVALID_ARGUMENT, "Invalid request"));
+ }
+ return reactor;
+ }
+};
+
+class MockCallbackTest : public ::testing::Test {
+ protected:
+ CallbackTestServiceImpl service_;
+ ServerContext context_;
+};
+
+TEST_F(MockCallbackTest, MockedCallSucceedsWithWait) {
+ experimental::CallbackServerContext ctx;
+ EchoRequest req;
+ EchoResponse resp;
+ grpc::internal::Mutex mu;
+ grpc::internal::CondVar cv;
+ grpc::Status status;
+ bool status_set = false;
+ DefaultReactorTestPeer peer(&ctx, [&](::grpc::Status s) {
+ grpc::internal::MutexLock l(&mu);
+ status_set = true;
+ status = std::move(s);
+ cv.Signal();
+ });
+
+ req.set_message("mock 1");
+ auto* reactor = service_.Echo(&ctx, &req, &resp);
+ cv.WaitUntil(&mu, [&] {
+ grpc::internal::MutexLock l(&mu);
+ return status_set;
+ });
+ EXPECT_EQ(reactor, peer.reactor());
+ EXPECT_TRUE(peer.test_status_set());
+ EXPECT_TRUE(peer.test_status().ok());
+ EXPECT_TRUE(status_set);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(req.message(), resp.message());
+}
+
+TEST_F(MockCallbackTest, MockedCallSucceeds) {
+ experimental::CallbackServerContext ctx;
+ EchoRequest req;
+ EchoResponse resp;
+ DefaultReactorTestPeer peer(&ctx);
+
+ req.set_message("ha ha, consider yourself mocked.");
+ auto* reactor = service_.Echo(&ctx, &req, &resp);
+ EXPECT_EQ(reactor, peer.reactor());
+ EXPECT_TRUE(peer.test_status_set());
+ EXPECT_TRUE(peer.test_status().ok());
+}
+
+TEST_F(MockCallbackTest, MockedCallFails) {
+ experimental::CallbackServerContext ctx;
+ EchoRequest req;
+ EchoResponse resp;
+ DefaultReactorTestPeer peer(&ctx);
+
+ auto* reactor = service_.Echo(&ctx, &req, &resp);
+ EXPECT_EQ(reactor, peer.reactor());
+ EXPECT_TRUE(peer.test_status_set());
+ EXPECT_EQ(peer.test_status().error_code(), StatusCode::INVALID_ARGUMENT);
+}
+
+class TestServiceImpl : public EchoTestService::Service {
+ public:
+ Status Echo(ServerContext* /*context*/, const EchoRequest* request,
+ EchoResponse* response) override {
+ response->set_message(request->message());
+ return Status::OK;
+ }
+
+ Status RequestStream(ServerContext* /*context*/,
+ ServerReader<EchoRequest>* reader,
+ EchoResponse* response) override {
+ EchoRequest request;
TString resp("");
- while (reader->Read(&request)) {
- gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
- resp.append(request.message());
- }
- response->set_message(resp);
- return Status::OK;
- }
-
- Status ResponseStream(ServerContext* /*context*/, const EchoRequest* request,
- ServerWriter<EchoResponse>* writer) override {
- EchoResponse response;
+ while (reader->Read(&request)) {
+ gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
+ resp.append(request.message());
+ }
+ response->set_message(resp);
+ return Status::OK;
+ }
+
+ Status ResponseStream(ServerContext* /*context*/, const EchoRequest* request,
+ ServerWriter<EchoResponse>* writer) override {
+ EchoResponse response;
vector<TString> tokens = split(request->message());
for (const TString& token : tokens) {
- response.set_message(token);
- writer->Write(response);
- }
- return Status::OK;
- }
-
- Status BidiStream(
- ServerContext* /*context*/,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
- EchoRequest request;
- EchoResponse response;
- while (stream->Read(&request)) {
- gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
- response.set_message(request.message());
- stream->Write(response);
- }
- return Status::OK;
- }
-
- private:
+ response.set_message(token);
+ writer->Write(response);
+ }
+ return Status::OK;
+ }
+
+ Status BidiStream(
+ ServerContext* /*context*/,
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
+ EchoRequest request;
+ EchoResponse response;
+ while (stream->Read(&request)) {
+ gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
+ response.set_message(request.message());
+ stream->Write(response);
+ }
+ return Status::OK;
+ }
+
+ private:
const vector<TString> split(const TString& input) {
TString buff("");
vector<TString> result;
-
- for (auto n : input) {
- if (n != ' ') {
- buff += n;
- continue;
- }
- if (buff == "") continue;
- result.push_back(buff);
- buff = "";
- }
- if (buff != "") result.push_back(buff);
-
- return result;
- }
-};
-
-class MockTest : public ::testing::Test {
- protected:
- MockTest() {}
-
- void SetUp() override {
- int port = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port;
- // Setup server
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- void TearDown() override { server_->Shutdown(); }
-
- void ResetStub() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
- TestServiceImpl service_;
-};
-
-// Do one real rpc and one mocked one
-TEST_F(MockTest, SimpleRpc) {
- ResetStub();
- FakeClient client(stub_.get());
- client.DoEcho();
- MockEchoTestServiceStub stub;
- EchoResponse resp;
- resp.set_message("hello world");
- EXPECT_CALL(stub, Echo(_, _, _))
- .Times(AtLeast(1))
- .WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK)));
- client.ResetStub(&stub);
- client.DoEcho();
-}
-
-TEST_F(MockTest, ClientStream) {
- ResetStub();
- FakeClient client(stub_.get());
- client.DoRequestStream();
-
- MockEchoTestServiceStub stub;
- auto w = new MockClientWriter<EchoRequest>();
- EchoResponse resp;
- resp.set_message("hello, world");
-
- EXPECT_CALL(*w, Write(_, _)).Times(2).WillRepeatedly(Return(true));
- EXPECT_CALL(*w, WritesDone());
- EXPECT_CALL(*w, Finish()).WillOnce(Return(Status::OK));
-
- EXPECT_CALL(stub, RequestStreamRaw(_, _))
- .WillOnce(DoAll(SetArgPointee<1>(resp), Return(w)));
- client.ResetStub(&stub);
- client.DoRequestStream();
-}
-
-TEST_F(MockTest, ServerStream) {
- ResetStub();
- FakeClient client(stub_.get());
- client.DoResponseStream();
-
- MockEchoTestServiceStub stub;
- auto r = new MockClientReader<EchoResponse>();
- EchoResponse resp1;
- resp1.set_message("hello");
- EchoResponse resp2;
- resp2.set_message("world");
-
- EXPECT_CALL(*r, Read(_))
- .WillOnce(DoAll(SetArgPointee<0>(resp1), Return(true)))
- .WillOnce(DoAll(SetArgPointee<0>(resp2), Return(true)))
- .WillOnce(Return(false));
- EXPECT_CALL(*r, Finish()).WillOnce(Return(Status::OK));
-
- EXPECT_CALL(stub, ResponseStreamRaw(_, _)).WillOnce(Return(r));
-
- client.ResetStub(&stub);
- client.DoResponseStream();
-}
-
-ACTION_P(copy, msg) { arg0->set_message(msg->message()); }
-
-TEST_F(MockTest, BidiStream) {
- ResetStub();
- FakeClient client(stub_.get());
- client.DoBidiStream();
- MockEchoTestServiceStub stub;
- auto rw = new MockClientReaderWriter<EchoRequest, EchoResponse>();
- EchoRequest msg;
-
- EXPECT_CALL(*rw, Write(_, _))
- .Times(3)
- .WillRepeatedly(DoAll(SaveArg<0>(&msg), Return(true)));
- EXPECT_CALL(*rw, Read(_))
- .WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
- .WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
- .WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
- .WillOnce(Return(false));
- EXPECT_CALL(*rw, WritesDone());
- EXPECT_CALL(*rw, Finish()).WillOnce(Return(Status::OK));
-
- EXPECT_CALL(stub, BidiStreamRaw(_)).WillOnce(Return(rw));
- client.ResetStub(&stub);
- client.DoBidiStream();
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+
+ for (auto n : input) {
+ if (n != ' ') {
+ buff += n;
+ continue;
+ }
+ if (buff == "") continue;
+ result.push_back(buff);
+ buff = "";
+ }
+ if (buff != "") result.push_back(buff);
+
+ return result;
+ }
+};
+
+class MockTest : public ::testing::Test {
+ protected:
+ MockTest() {}
+
+ void SetUp() override {
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port;
+ // Setup server
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ void TearDown() override { server_->Shutdown(); }
+
+ void ResetStub() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ TestServiceImpl service_;
+};
+
+// Do one real rpc and one mocked one
+TEST_F(MockTest, SimpleRpc) {
+ ResetStub();
+ FakeClient client(stub_.get());
+ client.DoEcho();
+ MockEchoTestServiceStub stub;
+ EchoResponse resp;
+ resp.set_message("hello world");
+ EXPECT_CALL(stub, Echo(_, _, _))
+ .Times(AtLeast(1))
+ .WillOnce(DoAll(SetArgPointee<2>(resp), Return(Status::OK)));
+ client.ResetStub(&stub);
+ client.DoEcho();
+}
+
+TEST_F(MockTest, ClientStream) {
+ ResetStub();
+ FakeClient client(stub_.get());
+ client.DoRequestStream();
+
+ MockEchoTestServiceStub stub;
+ auto w = new MockClientWriter<EchoRequest>();
+ EchoResponse resp;
+ resp.set_message("hello, world");
+
+ EXPECT_CALL(*w, Write(_, _)).Times(2).WillRepeatedly(Return(true));
+ EXPECT_CALL(*w, WritesDone());
+ EXPECT_CALL(*w, Finish()).WillOnce(Return(Status::OK));
+
+ EXPECT_CALL(stub, RequestStreamRaw(_, _))
+ .WillOnce(DoAll(SetArgPointee<1>(resp), Return(w)));
+ client.ResetStub(&stub);
+ client.DoRequestStream();
+}
+
+TEST_F(MockTest, ServerStream) {
+ ResetStub();
+ FakeClient client(stub_.get());
+ client.DoResponseStream();
+
+ MockEchoTestServiceStub stub;
+ auto r = new MockClientReader<EchoResponse>();
+ EchoResponse resp1;
+ resp1.set_message("hello");
+ EchoResponse resp2;
+ resp2.set_message("world");
+
+ EXPECT_CALL(*r, Read(_))
+ .WillOnce(DoAll(SetArgPointee<0>(resp1), Return(true)))
+ .WillOnce(DoAll(SetArgPointee<0>(resp2), Return(true)))
+ .WillOnce(Return(false));
+ EXPECT_CALL(*r, Finish()).WillOnce(Return(Status::OK));
+
+ EXPECT_CALL(stub, ResponseStreamRaw(_, _)).WillOnce(Return(r));
+
+ client.ResetStub(&stub);
+ client.DoResponseStream();
+}
+
+ACTION_P(copy, msg) { arg0->set_message(msg->message()); }
+
+TEST_F(MockTest, BidiStream) {
+ ResetStub();
+ FakeClient client(stub_.get());
+ client.DoBidiStream();
+ MockEchoTestServiceStub stub;
+ auto rw = new MockClientReaderWriter<EchoRequest, EchoResponse>();
+ EchoRequest msg;
+
+ EXPECT_CALL(*rw, Write(_, _))
+ .Times(3)
+ .WillRepeatedly(DoAll(SaveArg<0>(&msg), Return(true)));
+ EXPECT_CALL(*rw, Read(_))
+ .WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
+ .WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
+ .WillOnce(DoAll(WithArg<0>(copy(&msg)), Return(true)))
+ .WillOnce(Return(false));
+ EXPECT_CALL(*rw, WritesDone());
+ EXPECT_CALL(*rw, Finish()).WillOnce(Return(Status::OK));
+
+ EXPECT_CALL(stub, BidiStreamRaw(_)).WillOnce(Return(rw));
+ client.ResetStub(&stub);
+ client.DoBidiStream();
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
index 37e696adaa..4be070ec71 100644
--- a/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/nonblocking_test.cc
@@ -1,203 +1,203 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/lib/gpr/tls.h"
-#include "src/core/lib/iomgr/port.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-
-#ifdef GRPC_POSIX_SOCKET
-#include "src/core/lib/iomgr/ev_posix.h"
-#endif // GRPC_POSIX_SOCKET
-
-#include <gtest/gtest.h>
-
-#ifdef GRPC_POSIX_SOCKET
-// Thread-local variable to so that only polls from this test assert
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/lib/gpr/tls.h"
+#include "src/core/lib/iomgr/port.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+#ifdef GRPC_POSIX_SOCKET
+#include "src/core/lib/iomgr/ev_posix.h"
+#endif // GRPC_POSIX_SOCKET
+
+#include <gtest/gtest.h>
+
+#ifdef GRPC_POSIX_SOCKET
+// Thread-local variable to so that only polls from this test assert
// non-blocking (not polls from resolver, timer thread, etc), and only when the
// thread is waiting on polls caused by CompletionQueue::AsyncNext (not for
// picking a port or other reasons).
GPR_TLS_DECL(g_is_nonblocking_poll);
-
-namespace {
-
-int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,
- int timeout) {
+
+namespace {
+
+int maybe_assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,
+ int timeout) {
// Only assert that this poll should have zero timeout if we're in the
// middle of a zero-timeout CQ Next.
if (gpr_tls_get(&g_is_nonblocking_poll)) {
- GPR_ASSERT(timeout == 0);
- }
- return poll(pfds, nfds, timeout);
-}
-
-} // namespace
-
-namespace grpc {
-namespace testing {
-namespace {
-
-void* tag(int i) { return reinterpret_cast<void*>(static_cast<intptr_t>(i)); }
-int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
-
-class NonblockingTest : public ::testing::Test {
- protected:
- NonblockingTest() {}
-
- void SetUp() override {
- port_ = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port_;
-
- // Setup server
- BuildAndStartServer();
- }
-
- bool LoopForTag(void** tag, bool* ok) {
+ GPR_ASSERT(timeout == 0);
+ }
+ return poll(pfds, nfds, timeout);
+}
+
+} // namespace
+
+namespace grpc {
+namespace testing {
+namespace {
+
+void* tag(int i) { return reinterpret_cast<void*>(static_cast<intptr_t>(i)); }
+int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
+
+class NonblockingTest : public ::testing::Test {
+ protected:
+ NonblockingTest() {}
+
+ void SetUp() override {
+ port_ = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port_;
+
+ // Setup server
+ BuildAndStartServer();
+ }
+
+ bool LoopForTag(void** tag, bool* ok) {
// Temporarily set the thread-local nonblocking poll flag so that the polls
// caused by this loop are indeed sent by the library with zero timeout.
intptr_t orig_val = gpr_tls_get(&g_is_nonblocking_poll);
gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(true));
- for (;;) {
- auto r = cq_->AsyncNext(tag, ok, gpr_time_0(GPR_CLOCK_REALTIME));
- if (r == CompletionQueue::SHUTDOWN) {
+ for (;;) {
+ auto r = cq_->AsyncNext(tag, ok, gpr_time_0(GPR_CLOCK_REALTIME));
+ if (r == CompletionQueue::SHUTDOWN) {
gpr_tls_set(&g_is_nonblocking_poll, orig_val);
- return false;
- } else if (r == CompletionQueue::GOT_EVENT) {
+ return false;
+ } else if (r == CompletionQueue::GOT_EVENT) {
gpr_tls_set(&g_is_nonblocking_poll, orig_val);
- return true;
- }
- }
- }
-
- void TearDown() override {
- server_->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- cq_->Shutdown();
- while (LoopForTag(&ignored_tag, &ignored_ok))
- ;
- stub_.reset();
- grpc_recycle_unused_port(port_);
- }
-
- void BuildAndStartServer() {
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- grpc::InsecureServerCredentials());
- service_.reset(new grpc::testing::EchoTestService::AsyncService());
- builder.RegisterService(service_.get());
- cq_ = builder.AddCompletionQueue();
- server_ = builder.BuildAndStart();
- }
-
- void ResetStub() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), grpc::InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- void SendRpc(int num_rpcs) {
- for (int i = 0; i < num_rpcs; i++) {
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message("hello non-blocking world");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->PrepareAsyncEcho(&cli_ctx, send_request, cq_.get()));
-
- response_reader->StartCall();
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- service_->RequestEcho(&srv_ctx, &recv_request, &response_writer,
- cq_.get(), cq_.get(), tag(2));
-
- void* got_tag;
- bool ok;
- EXPECT_TRUE(LoopForTag(&got_tag, &ok));
- EXPECT_TRUE(ok);
- EXPECT_EQ(detag(got_tag), 2);
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(3));
-
- int tagsum = 0;
- int tagprod = 1;
- EXPECT_TRUE(LoopForTag(&got_tag, &ok));
- EXPECT_TRUE(ok);
- tagsum += detag(got_tag);
- tagprod *= detag(got_tag);
-
- EXPECT_TRUE(LoopForTag(&got_tag, &ok));
- EXPECT_TRUE(ok);
- tagsum += detag(got_tag);
- tagprod *= detag(got_tag);
-
- EXPECT_EQ(tagsum, 7);
- EXPECT_EQ(tagprod, 12);
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- }
- }
-
- std::unique_ptr<ServerCompletionQueue> cq_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::unique_ptr<grpc::testing::EchoTestService::AsyncService> service_;
- std::ostringstream server_address_;
- int port_;
-};
-
-TEST_F(NonblockingTest, SimpleRpc) {
- ResetStub();
- SendRpc(10);
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-#endif // GRPC_POSIX_SOCKET
-
-int main(int argc, char** argv) {
-#ifdef GRPC_POSIX_SOCKET
- // Override the poll function before anything else can happen
- grpc_poll_function = maybe_assert_non_blocking_poll;
-
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
+ return true;
+ }
+ }
+ }
+
+ void TearDown() override {
+ server_->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ cq_->Shutdown();
+ while (LoopForTag(&ignored_tag, &ignored_ok))
+ ;
+ stub_.reset();
+ grpc_recycle_unused_port(port_);
+ }
+
+ void BuildAndStartServer() {
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ grpc::InsecureServerCredentials());
+ service_.reset(new grpc::testing::EchoTestService::AsyncService());
+ builder.RegisterService(service_.get());
+ cq_ = builder.AddCompletionQueue();
+ server_ = builder.BuildAndStart();
+ }
+
+ void ResetStub() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), grpc::InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ void SendRpc(int num_rpcs) {
+ for (int i = 0; i < num_rpcs; i++) {
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message("hello non-blocking world");
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->PrepareAsyncEcho(&cli_ctx, send_request, cq_.get()));
+
+ response_reader->StartCall();
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ service_->RequestEcho(&srv_ctx, &recv_request, &response_writer,
+ cq_.get(), cq_.get(), tag(2));
+
+ void* got_tag;
+ bool ok;
+ EXPECT_TRUE(LoopForTag(&got_tag, &ok));
+ EXPECT_TRUE(ok);
+ EXPECT_EQ(detag(got_tag), 2);
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(3));
+
+ int tagsum = 0;
+ int tagprod = 1;
+ EXPECT_TRUE(LoopForTag(&got_tag, &ok));
+ EXPECT_TRUE(ok);
+ tagsum += detag(got_tag);
+ tagprod *= detag(got_tag);
+
+ EXPECT_TRUE(LoopForTag(&got_tag, &ok));
+ EXPECT_TRUE(ok);
+ tagsum += detag(got_tag);
+ tagprod *= detag(got_tag);
+
+ EXPECT_EQ(tagsum, 7);
+ EXPECT_EQ(tagprod, 12);
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ }
+ }
+
+ std::unique_ptr<ServerCompletionQueue> cq_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::unique_ptr<grpc::testing::EchoTestService::AsyncService> service_;
+ std::ostringstream server_address_;
+ int port_;
+};
+
+TEST_F(NonblockingTest, SimpleRpc) {
+ ResetStub();
+ SendRpc(10);
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+#endif // GRPC_POSIX_SOCKET
+
+int main(int argc, char** argv) {
+#ifdef GRPC_POSIX_SOCKET
+ // Override the poll function before anything else can happen
+ grpc_poll_function = maybe_assert_non_blocking_poll;
+
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
gpr_tls_init(&g_is_nonblocking_poll);
// Start the nonblocking poll thread-local variable as false because the
@@ -205,10 +205,10 @@ int main(int argc, char** argv) {
// timeout).
gpr_tls_set(&g_is_nonblocking_poll, static_cast<intptr_t>(false));
- int ret = RUN_ALL_TESTS();
+ int ret = RUN_ALL_TESTS();
gpr_tls_destroy(&g_is_nonblocking_poll);
- return ret;
+ return ret;
#else // GRPC_POSIX_SOCKET
return 0;
#endif // GRPC_POSIX_SOCKET
-}
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
index 3418cc9fd2..d79b33da70 100644
--- a/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/proto_server_reflection_test.cc
@@ -1,150 +1,150 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/grpc.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/ext/proto_server_reflection_plugin.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/proto_reflection_descriptor_database.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-
-class ProtoServerReflectionTest : public ::testing::Test {
- public:
- ProtoServerReflectionTest() {}
-
- void SetUp() override {
- port_ = grpc_pick_unused_port_or_die();
- ref_desc_pool_ = protobuf::DescriptorPool::generated_pool();
-
- ServerBuilder builder;
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/ext/proto_server_reflection_plugin.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/proto_reflection_descriptor_database.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+
+class ProtoServerReflectionTest : public ::testing::Test {
+ public:
+ ProtoServerReflectionTest() {}
+
+ void SetUp() override {
+ port_ = grpc_pick_unused_port_or_die();
+ ref_desc_pool_ = protobuf::DescriptorPool::generated_pool();
+
+ ServerBuilder builder;
TString server_address = "localhost:" + to_string(port_);
- builder.AddListeningPort(server_address, InsecureServerCredentials());
- server_ = builder.BuildAndStart();
- }
-
- void ResetStub() {
- string target = "dns:localhost:" + to_string(port_);
- std::shared_ptr<Channel> channel =
- grpc::CreateChannel(target, InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- desc_db_.reset(new ProtoReflectionDescriptorDatabase(channel));
- desc_pool_.reset(new protobuf::DescriptorPool(desc_db_.get()));
- }
-
- string to_string(const int number) {
- std::stringstream strs;
- strs << number;
- return strs.str();
- }
-
+ builder.AddListeningPort(server_address, InsecureServerCredentials());
+ server_ = builder.BuildAndStart();
+ }
+
+ void ResetStub() {
+ string target = "dns:localhost:" + to_string(port_);
+ std::shared_ptr<Channel> channel =
+ grpc::CreateChannel(target, InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ desc_db_.reset(new ProtoReflectionDescriptorDatabase(channel));
+ desc_pool_.reset(new protobuf::DescriptorPool(desc_db_.get()));
+ }
+
+ string to_string(const int number) {
+ std::stringstream strs;
+ strs << number;
+ return strs.str();
+ }
+
void CompareService(const TString& service) {
- const protobuf::ServiceDescriptor* service_desc =
- desc_pool_->FindServiceByName(service);
- const protobuf::ServiceDescriptor* ref_service_desc =
- ref_desc_pool_->FindServiceByName(service);
- EXPECT_TRUE(service_desc != nullptr);
- EXPECT_TRUE(ref_service_desc != nullptr);
- EXPECT_EQ(service_desc->DebugString(), ref_service_desc->DebugString());
-
- const protobuf::FileDescriptor* file_desc = service_desc->file();
- if (known_files_.find(file_desc->package() + "/" + file_desc->name()) !=
- known_files_.end()) {
- EXPECT_EQ(file_desc->DebugString(),
- ref_service_desc->file()->DebugString());
- known_files_.insert(file_desc->package() + "/" + file_desc->name());
- }
-
- for (int i = 0; i < service_desc->method_count(); ++i) {
- CompareMethod(service_desc->method(i)->full_name());
- }
- }
-
+ const protobuf::ServiceDescriptor* service_desc =
+ desc_pool_->FindServiceByName(service);
+ const protobuf::ServiceDescriptor* ref_service_desc =
+ ref_desc_pool_->FindServiceByName(service);
+ EXPECT_TRUE(service_desc != nullptr);
+ EXPECT_TRUE(ref_service_desc != nullptr);
+ EXPECT_EQ(service_desc->DebugString(), ref_service_desc->DebugString());
+
+ const protobuf::FileDescriptor* file_desc = service_desc->file();
+ if (known_files_.find(file_desc->package() + "/" + file_desc->name()) !=
+ known_files_.end()) {
+ EXPECT_EQ(file_desc->DebugString(),
+ ref_service_desc->file()->DebugString());
+ known_files_.insert(file_desc->package() + "/" + file_desc->name());
+ }
+
+ for (int i = 0; i < service_desc->method_count(); ++i) {
+ CompareMethod(service_desc->method(i)->full_name());
+ }
+ }
+
void CompareMethod(const TString& method) {
- const protobuf::MethodDescriptor* method_desc =
- desc_pool_->FindMethodByName(method);
- const protobuf::MethodDescriptor* ref_method_desc =
- ref_desc_pool_->FindMethodByName(method);
- EXPECT_TRUE(method_desc != nullptr);
- EXPECT_TRUE(ref_method_desc != nullptr);
- EXPECT_EQ(method_desc->DebugString(), ref_method_desc->DebugString());
-
- CompareType(method_desc->input_type()->full_name());
- CompareType(method_desc->output_type()->full_name());
- }
-
+ const protobuf::MethodDescriptor* method_desc =
+ desc_pool_->FindMethodByName(method);
+ const protobuf::MethodDescriptor* ref_method_desc =
+ ref_desc_pool_->FindMethodByName(method);
+ EXPECT_TRUE(method_desc != nullptr);
+ EXPECT_TRUE(ref_method_desc != nullptr);
+ EXPECT_EQ(method_desc->DebugString(), ref_method_desc->DebugString());
+
+ CompareType(method_desc->input_type()->full_name());
+ CompareType(method_desc->output_type()->full_name());
+ }
+
void CompareType(const TString& type) {
- if (known_types_.find(type) != known_types_.end()) {
- return;
- }
-
- const protobuf::Descriptor* desc = desc_pool_->FindMessageTypeByName(type);
- const protobuf::Descriptor* ref_desc =
- ref_desc_pool_->FindMessageTypeByName(type);
- EXPECT_TRUE(desc != nullptr);
- EXPECT_TRUE(ref_desc != nullptr);
- EXPECT_EQ(desc->DebugString(), ref_desc->DebugString());
- }
-
- protected:
- std::unique_ptr<Server> server_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<ProtoReflectionDescriptorDatabase> desc_db_;
- std::unique_ptr<protobuf::DescriptorPool> desc_pool_;
- std::unordered_set<string> known_files_;
- std::unordered_set<string> known_types_;
- const protobuf::DescriptorPool* ref_desc_pool_;
- int port_;
- reflection::ProtoServerReflectionPlugin plugin_;
-};
-
-TEST_F(ProtoServerReflectionTest, CheckResponseWithLocalDescriptorPool) {
- ResetStub();
-
+ if (known_types_.find(type) != known_types_.end()) {
+ return;
+ }
+
+ const protobuf::Descriptor* desc = desc_pool_->FindMessageTypeByName(type);
+ const protobuf::Descriptor* ref_desc =
+ ref_desc_pool_->FindMessageTypeByName(type);
+ EXPECT_TRUE(desc != nullptr);
+ EXPECT_TRUE(ref_desc != nullptr);
+ EXPECT_EQ(desc->DebugString(), ref_desc->DebugString());
+ }
+
+ protected:
+ std::unique_ptr<Server> server_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<ProtoReflectionDescriptorDatabase> desc_db_;
+ std::unique_ptr<protobuf::DescriptorPool> desc_pool_;
+ std::unordered_set<string> known_files_;
+ std::unordered_set<string> known_types_;
+ const protobuf::DescriptorPool* ref_desc_pool_;
+ int port_;
+ reflection::ProtoServerReflectionPlugin plugin_;
+};
+
+TEST_F(ProtoServerReflectionTest, CheckResponseWithLocalDescriptorPool) {
+ ResetStub();
+
std::vector<TString> services;
- desc_db_->GetServices(&services);
- // The service list has at least one service (reflection servcie).
- EXPECT_TRUE(services.size() > 0);
-
- for (auto it = services.begin(); it != services.end(); ++it) {
- CompareService(*it);
- }
-}
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ desc_db_->GetServices(&services);
+ // The service list has at least one service (reflection servcie).
+ EXPECT_TRUE(services.size() > 0);
+
+ for (auto it = services.begin(); it != services.end(); ++it) {
+ CompareService(*it);
+ }
+}
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc
index 42df83324b..184dc1e5f5 100644
--- a/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/raw_end2end_test.cc
@@ -1,370 +1,370 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <cinttypes>
-#include <memory>
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/iomgr/port.h"
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-#include "test/cpp/util/string_ref_helper.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-
-namespace grpc {
-namespace testing {
-
-namespace {
-
-void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
-int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
-
-class Verifier {
- public:
- Verifier() {}
-
- // Expect sets the expected ok value for a specific tag
- Verifier& Expect(int i, bool expect_ok) {
- expectations_[tag(i)] = expect_ok;
- return *this;
- }
-
- // Next waits for 1 async tag to complete, checks its
- // expectations, and returns the tag
- int Next(CompletionQueue* cq, bool ignore_ok) {
- bool ok;
- void* got_tag;
- EXPECT_TRUE(cq->Next(&got_tag, &ok));
- GotTag(got_tag, ok, ignore_ok);
- return detag(got_tag);
- }
-
- // Verify keeps calling Next until all currently set
- // expected tags are complete
- void Verify(CompletionQueue* cq) {
- GPR_ASSERT(!expectations_.empty());
- while (!expectations_.empty()) {
- Next(cq, false);
- }
- }
-
- private:
- void GotTag(void* got_tag, bool ok, bool ignore_ok) {
- auto it = expectations_.find(got_tag);
- if (it != expectations_.end()) {
- if (!ignore_ok) {
- EXPECT_EQ(it->second, ok);
- }
- expectations_.erase(it);
- }
- }
-
- std::map<void*, bool> expectations_;
-};
-
-class RawEnd2EndTest : public ::testing::Test {
- protected:
- RawEnd2EndTest() {}
-
- void SetUp() override {
- port_ = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port_;
- }
-
- void TearDown() override {
- server_->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- cq_->Shutdown();
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
- stub_.reset();
- grpc_recycle_unused_port(port_);
- }
-
- template <typename ServerType>
- std::unique_ptr<ServerType> BuildAndStartServer() {
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- grpc::InsecureServerCredentials());
- std::unique_ptr<ServerType> service(new ServerType());
- builder.RegisterService(service.get());
- cq_ = builder.AddCompletionQueue();
- server_ = builder.BuildAndStart();
- return service;
- }
-
- void ResetStub() {
- ChannelArguments args;
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), grpc::InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- std::unique_ptr<ServerCompletionQueue> cq_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
- int port_;
-
- // For the client application to populate and send to server.
- EchoRequest send_request_;
- ::grpc::ByteBuffer send_request_buffer_;
-
- // For the server to give to gRPC to be populated by incoming request
- // from client.
- EchoRequest recv_request_;
- ::grpc::ByteBuffer recv_request_buffer_;
-
- // For the server application to populate and send back to client.
- EchoResponse send_response_;
- ::grpc::ByteBuffer send_response_buffer_;
-
- // For the client to give to gRPC to be populated by incoming response
- // from server.
- EchoResponse recv_response_;
- ::grpc::ByteBuffer recv_response_buffer_;
- Status recv_status_;
-
- // Both sides need contexts
- ClientContext cli_ctx_;
- ServerContext srv_ctx_;
-};
-
-// Regular Async, both peers use proto
-TEST_F(RawEnd2EndTest, PureAsyncService) {
- typedef grpc::testing::EchoTestService::AsyncService SType;
- ResetStub();
- auto service = BuildAndStartServer<SType>();
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx_);
-
- send_request_.set_message("hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx_, send_request_, cq_.get()));
- service->RequestEcho(&srv_ctx_, &recv_request_, &response_writer, cq_.get(),
- cq_.get(), tag(2));
- response_reader->Finish(&recv_response_, &recv_status_, tag(4));
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_EQ(send_request_.message(), recv_request_.message());
- send_response_.set_message(recv_request_.message());
- response_writer.Finish(send_response_, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response_.message(), recv_response_.message());
- EXPECT_TRUE(recv_status_.ok());
-}
-
-// Client uses proto, server uses generic codegen, unary
-TEST_F(RawEnd2EndTest, RawServerUnary) {
- typedef grpc::testing::EchoTestService::WithRawMethod_Echo<
- grpc::testing::EchoTestService::Service>
- SType;
- ResetStub();
- auto service = BuildAndStartServer<SType>();
- grpc::GenericServerAsyncResponseWriter response_writer(&srv_ctx_);
-
- send_request_.set_message("hello unary");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub_->AsyncEcho(&cli_ctx_, send_request_, cq_.get()));
- service->RequestEcho(&srv_ctx_, &recv_request_buffer_, &response_writer,
- cq_.get(), cq_.get(), tag(2));
- response_reader->Finish(&recv_response_, &recv_status_, tag(4));
- Verifier().Expect(2, true).Verify(cq_.get());
- EXPECT_TRUE(ParseFromByteBuffer(&recv_request_buffer_, &recv_request_));
- EXPECT_EQ(send_request_.message(), recv_request_.message());
- send_response_.set_message(recv_request_.message());
- EXPECT_TRUE(
- SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_));
- response_writer.Finish(send_response_buffer_, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response_.message(), recv_response_.message());
- EXPECT_TRUE(recv_status_.ok());
-}
-
-// Client uses proto, server uses generic codegen, client streaming
-TEST_F(RawEnd2EndTest, RawServerClientStreaming) {
- typedef grpc::testing::EchoTestService::WithRawMethod_RequestStream<
- grpc::testing::EchoTestService::Service>
- SType;
- ResetStub();
- auto service = BuildAndStartServer<SType>();
-
- grpc::GenericServerAsyncReader srv_stream(&srv_ctx_);
-
- send_request_.set_message("hello client streaming");
- std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
- stub_->AsyncRequestStream(&cli_ctx_, &recv_response_, cq_.get(), tag(1)));
-
- service->RequestRequestStream(&srv_ctx_, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get());
-
- cli_stream->Write(send_request_, tag(3));
- srv_stream.Read(&recv_request_buffer_, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
- EXPECT_EQ(send_request_.message(), recv_request_.message());
-
- cli_stream->Write(send_request_, tag(5));
- srv_stream.Read(&recv_request_buffer_, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
-
- ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
- EXPECT_EQ(send_request_.message(), recv_request_.message());
- cli_stream->WritesDone(tag(7));
- srv_stream.Read(&recv_request_buffer_, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
- send_response_.set_message(recv_request_.message());
- SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_);
- srv_stream.Finish(send_response_buffer_, Status::OK, tag(9));
- cli_stream->Finish(&recv_status_, tag(10));
- Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
-
- EXPECT_EQ(send_response_.message(), recv_response_.message());
- EXPECT_TRUE(recv_status_.ok());
-}
-
-// Client uses proto, server uses generic codegen, server streaming
-TEST_F(RawEnd2EndTest, RawServerServerStreaming) {
- typedef grpc::testing::EchoTestService::WithRawMethod_ResponseStream<
- grpc::testing::EchoTestService::Service>
- SType;
- ResetStub();
- auto service = BuildAndStartServer<SType>();
- grpc::GenericServerAsyncWriter srv_stream(&srv_ctx_);
-
- send_request_.set_message("hello server streaming");
- std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
- stub_->AsyncResponseStream(&cli_ctx_, send_request_, cq_.get(), tag(1)));
-
- service->RequestResponseStream(&srv_ctx_, &recv_request_buffer_, &srv_stream,
- cq_.get(), cq_.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
- ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
- EXPECT_EQ(send_request_.message(), recv_request_.message());
-
- send_response_.set_message(recv_request_.message());
- SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_);
- srv_stream.Write(send_response_buffer_, tag(3));
- cli_stream->Read(&recv_response_, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- EXPECT_EQ(send_response_.message(), recv_response_.message());
-
- srv_stream.Write(send_response_buffer_, tag(5));
- cli_stream->Read(&recv_response_, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
- EXPECT_EQ(send_response_.message(), recv_response_.message());
-
- srv_stream.Finish(Status::OK, tag(7));
- cli_stream->Read(&recv_response_, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- cli_stream->Finish(&recv_status_, tag(9));
- Verifier().Expect(9, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status_.ok());
-}
-
-// Client uses proto, server uses generic codegen, bidi streaming
-TEST_F(RawEnd2EndTest, RawServerBidiStreaming) {
- typedef grpc::testing::EchoTestService::WithRawMethod_BidiStream<
- grpc::testing::EchoTestService::Service>
- SType;
- ResetStub();
- auto service = BuildAndStartServer<SType>();
-
- grpc::GenericServerAsyncReaderWriter srv_stream(&srv_ctx_);
-
- send_request_.set_message("hello bidi streaming");
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
- cli_stream(stub_->AsyncBidiStream(&cli_ctx_, cq_.get(), tag(1)));
-
- service->RequestBidiStream(&srv_ctx_, &srv_stream, cq_.get(), cq_.get(),
- tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
-
- cli_stream->Write(send_request_, tag(3));
- srv_stream.Read(&recv_request_buffer_, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
- ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
- EXPECT_EQ(send_request_.message(), recv_request_.message());
-
- send_response_.set_message(recv_request_.message());
- SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_);
- srv_stream.Write(send_response_buffer_, tag(5));
- cli_stream->Read(&recv_response_, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
- EXPECT_EQ(send_response_.message(), recv_response_.message());
-
- cli_stream->WritesDone(tag(7));
- srv_stream.Read(&recv_request_buffer_, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
-
- srv_stream.Finish(Status::OK, tag(9));
- cli_stream->Finish(&recv_status_, tag(10));
- Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
-
- EXPECT_TRUE(recv_status_.ok());
-}
-
-// Testing that this pattern compiles
-TEST_F(RawEnd2EndTest, CompileTest) {
- typedef grpc::testing::EchoTestService::WithRawMethod_Echo<
- grpc::testing::EchoTestService::AsyncService>
- SType;
- ResetStub();
- auto service = BuildAndStartServer<SType>();
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- // Change the backup poll interval from 5s to 100ms to speed up the
- // ReconnectChannel test
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- int ret = RUN_ALL_TESTS();
- return ret;
-}
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <cinttypes>
+#include <memory>
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/iomgr/port.h"
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+#include "test/cpp/util/string_ref_helper.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+
+namespace grpc {
+namespace testing {
+
+namespace {
+
+void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
+int detag(void* p) { return static_cast<int>(reinterpret_cast<intptr_t>(p)); }
+
+class Verifier {
+ public:
+ Verifier() {}
+
+ // Expect sets the expected ok value for a specific tag
+ Verifier& Expect(int i, bool expect_ok) {
+ expectations_[tag(i)] = expect_ok;
+ return *this;
+ }
+
+ // Next waits for 1 async tag to complete, checks its
+ // expectations, and returns the tag
+ int Next(CompletionQueue* cq, bool ignore_ok) {
+ bool ok;
+ void* got_tag;
+ EXPECT_TRUE(cq->Next(&got_tag, &ok));
+ GotTag(got_tag, ok, ignore_ok);
+ return detag(got_tag);
+ }
+
+ // Verify keeps calling Next until all currently set
+ // expected tags are complete
+ void Verify(CompletionQueue* cq) {
+ GPR_ASSERT(!expectations_.empty());
+ while (!expectations_.empty()) {
+ Next(cq, false);
+ }
+ }
+
+ private:
+ void GotTag(void* got_tag, bool ok, bool ignore_ok) {
+ auto it = expectations_.find(got_tag);
+ if (it != expectations_.end()) {
+ if (!ignore_ok) {
+ EXPECT_EQ(it->second, ok);
+ }
+ expectations_.erase(it);
+ }
+ }
+
+ std::map<void*, bool> expectations_;
+};
+
+class RawEnd2EndTest : public ::testing::Test {
+ protected:
+ RawEnd2EndTest() {}
+
+ void SetUp() override {
+ port_ = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port_;
+ }
+
+ void TearDown() override {
+ server_->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ cq_->Shutdown();
+ while (cq_->Next(&ignored_tag, &ignored_ok))
+ ;
+ stub_.reset();
+ grpc_recycle_unused_port(port_);
+ }
+
+ template <typename ServerType>
+ std::unique_ptr<ServerType> BuildAndStartServer() {
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ grpc::InsecureServerCredentials());
+ std::unique_ptr<ServerType> service(new ServerType());
+ builder.RegisterService(service.get());
+ cq_ = builder.AddCompletionQueue();
+ server_ = builder.BuildAndStart();
+ return service;
+ }
+
+ void ResetStub() {
+ ChannelArguments args;
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), grpc::InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ std::unique_ptr<ServerCompletionQueue> cq_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ int port_;
+
+ // For the client application to populate and send to server.
+ EchoRequest send_request_;
+ ::grpc::ByteBuffer send_request_buffer_;
+
+ // For the server to give to gRPC to be populated by incoming request
+ // from client.
+ EchoRequest recv_request_;
+ ::grpc::ByteBuffer recv_request_buffer_;
+
+ // For the server application to populate and send back to client.
+ EchoResponse send_response_;
+ ::grpc::ByteBuffer send_response_buffer_;
+
+ // For the client to give to gRPC to be populated by incoming response
+ // from server.
+ EchoResponse recv_response_;
+ ::grpc::ByteBuffer recv_response_buffer_;
+ Status recv_status_;
+
+ // Both sides need contexts
+ ClientContext cli_ctx_;
+ ServerContext srv_ctx_;
+};
+
+// Regular Async, both peers use proto
+TEST_F(RawEnd2EndTest, PureAsyncService) {
+ typedef grpc::testing::EchoTestService::AsyncService SType;
+ ResetStub();
+ auto service = BuildAndStartServer<SType>();
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx_);
+
+ send_request_.set_message("hello");
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx_, send_request_, cq_.get()));
+ service->RequestEcho(&srv_ctx_, &recv_request_, &response_writer, cq_.get(),
+ cq_.get(), tag(2));
+ response_reader->Finish(&recv_response_, &recv_status_, tag(4));
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_EQ(send_request_.message(), recv_request_.message());
+ send_response_.set_message(recv_request_.message());
+ response_writer.Finish(send_response_, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response_.message(), recv_response_.message());
+ EXPECT_TRUE(recv_status_.ok());
+}
+
+// Client uses proto, server uses generic codegen, unary
+TEST_F(RawEnd2EndTest, RawServerUnary) {
+ typedef grpc::testing::EchoTestService::WithRawMethod_Echo<
+ grpc::testing::EchoTestService::Service>
+ SType;
+ ResetStub();
+ auto service = BuildAndStartServer<SType>();
+ grpc::GenericServerAsyncResponseWriter response_writer(&srv_ctx_);
+
+ send_request_.set_message("hello unary");
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub_->AsyncEcho(&cli_ctx_, send_request_, cq_.get()));
+ service->RequestEcho(&srv_ctx_, &recv_request_buffer_, &response_writer,
+ cq_.get(), cq_.get(), tag(2));
+ response_reader->Finish(&recv_response_, &recv_status_, tag(4));
+ Verifier().Expect(2, true).Verify(cq_.get());
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_request_buffer_, &recv_request_));
+ EXPECT_EQ(send_request_.message(), recv_request_.message());
+ send_response_.set_message(recv_request_.message());
+ EXPECT_TRUE(
+ SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_));
+ response_writer.Finish(send_response_buffer_, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response_.message(), recv_response_.message());
+ EXPECT_TRUE(recv_status_.ok());
+}
+
+// Client uses proto, server uses generic codegen, client streaming
+TEST_F(RawEnd2EndTest, RawServerClientStreaming) {
+ typedef grpc::testing::EchoTestService::WithRawMethod_RequestStream<
+ grpc::testing::EchoTestService::Service>
+ SType;
+ ResetStub();
+ auto service = BuildAndStartServer<SType>();
+
+ grpc::GenericServerAsyncReader srv_stream(&srv_ctx_);
+
+ send_request_.set_message("hello client streaming");
+ std::unique_ptr<ClientAsyncWriter<EchoRequest>> cli_stream(
+ stub_->AsyncRequestStream(&cli_ctx_, &recv_response_, cq_.get(), tag(1)));
+
+ service->RequestRequestStream(&srv_ctx_, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get());
+
+ cli_stream->Write(send_request_, tag(3));
+ srv_stream.Read(&recv_request_buffer_, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
+ EXPECT_EQ(send_request_.message(), recv_request_.message());
+
+ cli_stream->Write(send_request_, tag(5));
+ srv_stream.Read(&recv_request_buffer_, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+
+ ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
+ EXPECT_EQ(send_request_.message(), recv_request_.message());
+ cli_stream->WritesDone(tag(7));
+ srv_stream.Read(&recv_request_buffer_, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
+ send_response_.set_message(recv_request_.message());
+ SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_);
+ srv_stream.Finish(send_response_buffer_, Status::OK, tag(9));
+ cli_stream->Finish(&recv_status_, tag(10));
+ Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
+
+ EXPECT_EQ(send_response_.message(), recv_response_.message());
+ EXPECT_TRUE(recv_status_.ok());
+}
+
+// Client uses proto, server uses generic codegen, server streaming
+TEST_F(RawEnd2EndTest, RawServerServerStreaming) {
+ typedef grpc::testing::EchoTestService::WithRawMethod_ResponseStream<
+ grpc::testing::EchoTestService::Service>
+ SType;
+ ResetStub();
+ auto service = BuildAndStartServer<SType>();
+ grpc::GenericServerAsyncWriter srv_stream(&srv_ctx_);
+
+ send_request_.set_message("hello server streaming");
+ std::unique_ptr<ClientAsyncReader<EchoResponse>> cli_stream(
+ stub_->AsyncResponseStream(&cli_ctx_, send_request_, cq_.get(), tag(1)));
+
+ service->RequestResponseStream(&srv_ctx_, &recv_request_buffer_, &srv_stream,
+ cq_.get(), cq_.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+ ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
+ EXPECT_EQ(send_request_.message(), recv_request_.message());
+
+ send_response_.set_message(recv_request_.message());
+ SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_);
+ srv_stream.Write(send_response_buffer_, tag(3));
+ cli_stream->Read(&recv_response_, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ EXPECT_EQ(send_response_.message(), recv_response_.message());
+
+ srv_stream.Write(send_response_buffer_, tag(5));
+ cli_stream->Read(&recv_response_, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+ EXPECT_EQ(send_response_.message(), recv_response_.message());
+
+ srv_stream.Finish(Status::OK, tag(7));
+ cli_stream->Read(&recv_response_, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ cli_stream->Finish(&recv_status_, tag(9));
+ Verifier().Expect(9, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status_.ok());
+}
+
+// Client uses proto, server uses generic codegen, bidi streaming
+TEST_F(RawEnd2EndTest, RawServerBidiStreaming) {
+ typedef grpc::testing::EchoTestService::WithRawMethod_BidiStream<
+ grpc::testing::EchoTestService::Service>
+ SType;
+ ResetStub();
+ auto service = BuildAndStartServer<SType>();
+
+ grpc::GenericServerAsyncReaderWriter srv_stream(&srv_ctx_);
+
+ send_request_.set_message("hello bidi streaming");
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub_->AsyncBidiStream(&cli_ctx_, cq_.get(), tag(1)));
+
+ service->RequestBidiStream(&srv_ctx_, &srv_stream, cq_.get(), cq_.get(),
+ tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
+
+ cli_stream->Write(send_request_, tag(3));
+ srv_stream.Read(&recv_request_buffer_, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq_.get());
+ ParseFromByteBuffer(&recv_request_buffer_, &recv_request_);
+ EXPECT_EQ(send_request_.message(), recv_request_.message());
+
+ send_response_.set_message(recv_request_.message());
+ SerializeToByteBufferInPlace(&send_response_, &send_response_buffer_);
+ srv_stream.Write(send_response_buffer_, tag(5));
+ cli_stream->Read(&recv_response_, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq_.get());
+ EXPECT_EQ(send_response_.message(), recv_response_.message());
+
+ cli_stream->WritesDone(tag(7));
+ srv_stream.Read(&recv_request_buffer_, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq_.get());
+
+ srv_stream.Finish(Status::OK, tag(9));
+ cli_stream->Finish(&recv_status_, tag(10));
+ Verifier().Expect(9, true).Expect(10, true).Verify(cq_.get());
+
+ EXPECT_TRUE(recv_status_.ok());
+}
+
+// Testing that this pattern compiles
+TEST_F(RawEnd2EndTest, CompileTest) {
+ typedef grpc::testing::EchoTestService::WithRawMethod_Echo<
+ grpc::testing::EchoTestService::AsyncService>
+ SType;
+ ResetStub();
+ auto service = BuildAndStartServer<SType>();
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ // Change the backup poll interval from 5s to 100ms to speed up the
+ // ReconnectChannel test
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int ret = RUN_ALL_TESTS();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
index cc368bbb90..004902cad3 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_builder_plugin_test.cc
@@ -1,265 +1,265 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/impl/server_builder_option.h>
-#include <grpcpp/impl/server_builder_plugin.h>
-#include <grpcpp/impl/server_initializer.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-
-#include <gtest/gtest.h>
-
-#define PLUGIN_NAME "TestServerBuilderPlugin"
-
-namespace grpc {
-namespace testing {
-
-class TestServerBuilderPlugin : public ServerBuilderPlugin {
- public:
- TestServerBuilderPlugin() : service_(new TestServiceImpl()) {
- init_server_is_called_ = false;
- finish_is_called_ = false;
- change_arguments_is_called_ = false;
- register_service_ = false;
- }
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/impl/server_builder_option.h>
+#include <grpcpp/impl/server_builder_plugin.h>
+#include <grpcpp/impl/server_initializer.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include <gtest/gtest.h>
+
+#define PLUGIN_NAME "TestServerBuilderPlugin"
+
+namespace grpc {
+namespace testing {
+
+class TestServerBuilderPlugin : public ServerBuilderPlugin {
+ public:
+ TestServerBuilderPlugin() : service_(new TestServiceImpl()) {
+ init_server_is_called_ = false;
+ finish_is_called_ = false;
+ change_arguments_is_called_ = false;
+ register_service_ = false;
+ }
+
TString name() override { return PLUGIN_NAME; }
-
- void InitServer(ServerInitializer* si) override {
- init_server_is_called_ = true;
- if (register_service_) {
- si->RegisterService(service_);
- }
- }
-
- void Finish(ServerInitializer* /*si*/) override { finish_is_called_ = true; }
-
+
+ void InitServer(ServerInitializer* si) override {
+ init_server_is_called_ = true;
+ if (register_service_) {
+ si->RegisterService(service_);
+ }
+ }
+
+ void Finish(ServerInitializer* /*si*/) override { finish_is_called_ = true; }
+
void ChangeArguments(const TString& /*name*/, void* /*value*/) override {
- change_arguments_is_called_ = true;
- }
-
- bool has_async_methods() const override {
- if (register_service_) {
- return service_->has_async_methods();
- }
- return false;
- }
-
- bool has_sync_methods() const override {
- if (register_service_) {
- return service_->has_synchronous_methods();
- }
- return false;
- }
-
- void SetRegisterService() { register_service_ = true; }
-
- bool init_server_is_called() { return init_server_is_called_; }
- bool finish_is_called() { return finish_is_called_; }
- bool change_arguments_is_called() { return change_arguments_is_called_; }
-
- private:
- bool init_server_is_called_;
- bool finish_is_called_;
- bool change_arguments_is_called_;
- bool register_service_;
- std::shared_ptr<TestServiceImpl> service_;
-};
-
-class InsertPluginServerBuilderOption : public ServerBuilderOption {
- public:
- InsertPluginServerBuilderOption() { register_service_ = false; }
-
- void UpdateArguments(ChannelArguments* /*arg*/) override {}
-
- void UpdatePlugins(
- std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins) override {
- plugins->clear();
-
- std::unique_ptr<TestServerBuilderPlugin> plugin(
- new TestServerBuilderPlugin());
- if (register_service_) plugin->SetRegisterService();
- plugins->emplace_back(std::move(plugin));
- }
-
- void SetRegisterService() { register_service_ = true; }
-
- private:
- bool register_service_;
-};
-
-std::unique_ptr<ServerBuilderPlugin> CreateTestServerBuilderPlugin() {
- return std::unique_ptr<ServerBuilderPlugin>(new TestServerBuilderPlugin());
-}
-
-// Force AddServerBuilderPlugin() to be called at static initialization time.
-struct StaticTestPluginInitializer {
+ change_arguments_is_called_ = true;
+ }
+
+ bool has_async_methods() const override {
+ if (register_service_) {
+ return service_->has_async_methods();
+ }
+ return false;
+ }
+
+ bool has_sync_methods() const override {
+ if (register_service_) {
+ return service_->has_synchronous_methods();
+ }
+ return false;
+ }
+
+ void SetRegisterService() { register_service_ = true; }
+
+ bool init_server_is_called() { return init_server_is_called_; }
+ bool finish_is_called() { return finish_is_called_; }
+ bool change_arguments_is_called() { return change_arguments_is_called_; }
+
+ private:
+ bool init_server_is_called_;
+ bool finish_is_called_;
+ bool change_arguments_is_called_;
+ bool register_service_;
+ std::shared_ptr<TestServiceImpl> service_;
+};
+
+class InsertPluginServerBuilderOption : public ServerBuilderOption {
+ public:
+ InsertPluginServerBuilderOption() { register_service_ = false; }
+
+ void UpdateArguments(ChannelArguments* /*arg*/) override {}
+
+ void UpdatePlugins(
+ std::vector<std::unique_ptr<ServerBuilderPlugin>>* plugins) override {
+ plugins->clear();
+
+ std::unique_ptr<TestServerBuilderPlugin> plugin(
+ new TestServerBuilderPlugin());
+ if (register_service_) plugin->SetRegisterService();
+ plugins->emplace_back(std::move(plugin));
+ }
+
+ void SetRegisterService() { register_service_ = true; }
+
+ private:
+ bool register_service_;
+};
+
+std::unique_ptr<ServerBuilderPlugin> CreateTestServerBuilderPlugin() {
+ return std::unique_ptr<ServerBuilderPlugin>(new TestServerBuilderPlugin());
+}
+
+// Force AddServerBuilderPlugin() to be called at static initialization time.
+struct StaticTestPluginInitializer {
StaticTestPluginInitializer() {
::grpc::ServerBuilder::InternalAddPluginFactory(
&CreateTestServerBuilderPlugin);
}
-} static_plugin_initializer_test_;
-
-// When the param boolean is true, the ServerBuilder plugin will be added at the
-// time of static initialization. When it's false, the ServerBuilder plugin will
-// be added using ServerBuilder::SetOption().
-class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
- public:
- ServerBuilderPluginTest() {}
-
- void SetUp() override {
- port_ = grpc_pick_unused_port_or_die();
- builder_.reset(new ServerBuilder());
- }
-
- void InsertPlugin() {
- if (GetParam()) {
- // Add ServerBuilder plugin in static initialization
- CheckPresent();
- } else {
- // Add ServerBuilder plugin using ServerBuilder::SetOption()
- builder_->SetOption(std::unique_ptr<ServerBuilderOption>(
- new InsertPluginServerBuilderOption()));
- }
- }
-
- void InsertPluginWithTestService() {
- if (GetParam()) {
- // Add ServerBuilder plugin in static initialization
- auto plugin = CheckPresent();
- EXPECT_TRUE(plugin);
- plugin->SetRegisterService();
- } else {
- // Add ServerBuilder plugin using ServerBuilder::SetOption()
- std::unique_ptr<InsertPluginServerBuilderOption> option(
- new InsertPluginServerBuilderOption());
- option->SetRegisterService();
- builder_->SetOption(std::move(option));
- }
- }
-
- void StartServer() {
+} static_plugin_initializer_test_;
+
+// When the param boolean is true, the ServerBuilder plugin will be added at the
+// time of static initialization. When it's false, the ServerBuilder plugin will
+// be added using ServerBuilder::SetOption().
+class ServerBuilderPluginTest : public ::testing::TestWithParam<bool> {
+ public:
+ ServerBuilderPluginTest() {}
+
+ void SetUp() override {
+ port_ = grpc_pick_unused_port_or_die();
+ builder_.reset(new ServerBuilder());
+ }
+
+ void InsertPlugin() {
+ if (GetParam()) {
+ // Add ServerBuilder plugin in static initialization
+ CheckPresent();
+ } else {
+ // Add ServerBuilder plugin using ServerBuilder::SetOption()
+ builder_->SetOption(std::unique_ptr<ServerBuilderOption>(
+ new InsertPluginServerBuilderOption()));
+ }
+ }
+
+ void InsertPluginWithTestService() {
+ if (GetParam()) {
+ // Add ServerBuilder plugin in static initialization
+ auto plugin = CheckPresent();
+ EXPECT_TRUE(plugin);
+ plugin->SetRegisterService();
+ } else {
+ // Add ServerBuilder plugin using ServerBuilder::SetOption()
+ std::unique_ptr<InsertPluginServerBuilderOption> option(
+ new InsertPluginServerBuilderOption());
+ option->SetRegisterService();
+ builder_->SetOption(std::move(option));
+ }
+ }
+
+ void StartServer() {
TString server_address = "localhost:" + to_string(port_);
- builder_->AddListeningPort(server_address, InsecureServerCredentials());
- // we run some tests without a service, and for those we need to supply a
- // frequently polled completion queue
- cq_ = builder_->AddCompletionQueue();
- cq_thread_ = new std::thread(&ServerBuilderPluginTest::RunCQ, this);
- server_ = builder_->BuildAndStart();
- EXPECT_TRUE(CheckPresent());
- }
-
- void ResetStub() {
- string target = "dns:localhost:" + to_string(port_);
- channel_ = grpc::CreateChannel(target, InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- void TearDown() override {
- auto plugin = CheckPresent();
- EXPECT_TRUE(plugin);
- EXPECT_TRUE(plugin->init_server_is_called());
- EXPECT_TRUE(plugin->finish_is_called());
- server_->Shutdown();
- cq_->Shutdown();
- cq_thread_->join();
- delete cq_thread_;
- }
-
- string to_string(const int number) {
- std::stringstream strs;
- strs << number;
- return strs.str();
- }
-
- protected:
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<ServerBuilder> builder_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<ServerCompletionQueue> cq_;
- std::unique_ptr<Server> server_;
- std::thread* cq_thread_;
- TestServiceImpl service_;
- int port_;
-
- private:
- TestServerBuilderPlugin* CheckPresent() {
- auto it = builder_->plugins_.begin();
- for (; it != builder_->plugins_.end(); it++) {
- if ((*it)->name() == PLUGIN_NAME) break;
- }
- if (it != builder_->plugins_.end()) {
- return static_cast<TestServerBuilderPlugin*>(it->get());
- } else {
- return nullptr;
- }
- }
-
- void RunCQ() {
- void* tag;
- bool ok;
- while (cq_->Next(&tag, &ok))
- ;
- }
-};
-
-TEST_P(ServerBuilderPluginTest, PluginWithoutServiceTest) {
- InsertPlugin();
- StartServer();
-}
-
-TEST_P(ServerBuilderPluginTest, PluginWithServiceTest) {
- InsertPluginWithTestService();
- StartServer();
- ResetStub();
-
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello hello hello hello");
- ClientContext context;
- context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
- Status s = stub_->Echo(&context, request, &response);
- EXPECT_EQ(response.message(), request.message());
- EXPECT_TRUE(s.ok());
-}
-
-INSTANTIATE_TEST_SUITE_P(ServerBuilderPluginTest, ServerBuilderPluginTest,
- ::testing::Values(false, true));
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ builder_->AddListeningPort(server_address, InsecureServerCredentials());
+ // we run some tests without a service, and for those we need to supply a
+ // frequently polled completion queue
+ cq_ = builder_->AddCompletionQueue();
+ cq_thread_ = new std::thread(&ServerBuilderPluginTest::RunCQ, this);
+ server_ = builder_->BuildAndStart();
+ EXPECT_TRUE(CheckPresent());
+ }
+
+ void ResetStub() {
+ string target = "dns:localhost:" + to_string(port_);
+ channel_ = grpc::CreateChannel(target, InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ void TearDown() override {
+ auto plugin = CheckPresent();
+ EXPECT_TRUE(plugin);
+ EXPECT_TRUE(plugin->init_server_is_called());
+ EXPECT_TRUE(plugin->finish_is_called());
+ server_->Shutdown();
+ cq_->Shutdown();
+ cq_thread_->join();
+ delete cq_thread_;
+ }
+
+ string to_string(const int number) {
+ std::stringstream strs;
+ strs << number;
+ return strs.str();
+ }
+
+ protected:
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<ServerBuilder> builder_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<ServerCompletionQueue> cq_;
+ std::unique_ptr<Server> server_;
+ std::thread* cq_thread_;
+ TestServiceImpl service_;
+ int port_;
+
+ private:
+ TestServerBuilderPlugin* CheckPresent() {
+ auto it = builder_->plugins_.begin();
+ for (; it != builder_->plugins_.end(); it++) {
+ if ((*it)->name() == PLUGIN_NAME) break;
+ }
+ if (it != builder_->plugins_.end()) {
+ return static_cast<TestServerBuilderPlugin*>(it->get());
+ } else {
+ return nullptr;
+ }
+ }
+
+ void RunCQ() {
+ void* tag;
+ bool ok;
+ while (cq_->Next(&tag, &ok))
+ ;
+ }
+};
+
+TEST_P(ServerBuilderPluginTest, PluginWithoutServiceTest) {
+ InsertPlugin();
+ StartServer();
+}
+
+TEST_P(ServerBuilderPluginTest, PluginWithServiceTest) {
+ InsertPluginWithTestService();
+ StartServer();
+ ResetStub();
+
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello hello hello hello");
+ ClientContext context;
+ context.set_compression_algorithm(GRPC_COMPRESS_GZIP);
+ Status s = stub_->Echo(&context, request, &response);
+ EXPECT_EQ(response.message(), request.message());
+ EXPECT_TRUE(s.ok());
+}
+
+INSTANTIATE_TEST_SUITE_P(ServerBuilderPluginTest, ServerBuilderPluginTest,
+ ::testing::Values(false, true));
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
index f8e4ea8eae..3616d680f9 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test.cc
@@ -1,160 +1,160 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/subprocess.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/subprocess.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
static TString g_root;
-
-namespace grpc {
-namespace testing {
-
-namespace {
-
-class ServiceImpl final : public ::grpc::testing::EchoTestService::Service {
- public:
- ServiceImpl() : bidi_stream_count_(0), response_stream_count_(0) {}
-
- Status BidiStream(
- ServerContext* /*context*/,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
- bidi_stream_count_++;
- EchoRequest request;
- EchoResponse response;
- while (stream->Read(&request)) {
- gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
- response.set_message(request.message());
- stream->Write(response);
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(1, GPR_TIMESPAN)));
- }
- return Status::OK;
- }
-
- Status ResponseStream(ServerContext* /*context*/,
- const EchoRequest* /*request*/,
- ServerWriter<EchoResponse>* writer) override {
- EchoResponse response;
- response_stream_count_++;
- for (int i = 0;; i++) {
- std::ostringstream msg;
- msg << "Hello " << i;
- response.set_message(msg.str());
- if (!writer->Write(response)) break;
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(1, GPR_TIMESPAN)));
- }
- return Status::OK;
- }
-
- int bidi_stream_count() { return bidi_stream_count_; }
-
- int response_stream_count() { return response_stream_count_; }
-
- private:
- int bidi_stream_count_;
- int response_stream_count_;
-};
-
-class CrashTest : public ::testing::Test {
- protected:
- CrashTest() {}
-
+
+namespace grpc {
+namespace testing {
+
+namespace {
+
+class ServiceImpl final : public ::grpc::testing::EchoTestService::Service {
+ public:
+ ServiceImpl() : bidi_stream_count_(0), response_stream_count_(0) {}
+
+ Status BidiStream(
+ ServerContext* /*context*/,
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
+ bidi_stream_count_++;
+ EchoRequest request;
+ EchoResponse response;
+ while (stream->Read(&request)) {
+ gpr_log(GPR_INFO, "recv msg %s", request.message().c_str());
+ response.set_message(request.message());
+ stream->Write(response);
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(1, GPR_TIMESPAN)));
+ }
+ return Status::OK;
+ }
+
+ Status ResponseStream(ServerContext* /*context*/,
+ const EchoRequest* /*request*/,
+ ServerWriter<EchoResponse>* writer) override {
+ EchoResponse response;
+ response_stream_count_++;
+ for (int i = 0;; i++) {
+ std::ostringstream msg;
+ msg << "Hello " << i;
+ response.set_message(msg.str());
+ if (!writer->Write(response)) break;
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(1, GPR_TIMESPAN)));
+ }
+ return Status::OK;
+ }
+
+ int bidi_stream_count() { return bidi_stream_count_; }
+
+ int response_stream_count() { return response_stream_count_; }
+
+ private:
+ int bidi_stream_count_;
+ int response_stream_count_;
+};
+
+class CrashTest : public ::testing::Test {
+ protected:
+ CrashTest() {}
+
std::unique_ptr<Server> CreateServerAndClient(const TString& mode) {
- auto port = grpc_pick_unused_port_or_die();
- std::ostringstream addr_stream;
- addr_stream << "localhost:" << port;
- auto addr = addr_stream.str();
- client_.reset(new SubProcess({g_root + "/server_crash_test_client",
- "--address=" + addr, "--mode=" + mode}));
- GPR_ASSERT(client_);
-
- ServerBuilder builder;
- builder.AddListeningPort(addr, grpc::InsecureServerCredentials());
- builder.RegisterService(&service_);
- return builder.BuildAndStart();
- }
-
- void KillClient() { client_.reset(); }
-
- bool HadOneBidiStream() { return service_.bidi_stream_count() == 1; }
-
- bool HadOneResponseStream() { return service_.response_stream_count() == 1; }
-
- private:
- std::unique_ptr<SubProcess> client_;
- ServiceImpl service_;
-};
-
-TEST_F(CrashTest, ResponseStream) {
- auto server = CreateServerAndClient("response");
-
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(60, GPR_TIMESPAN)));
- KillClient();
- server->Shutdown();
- GPR_ASSERT(HadOneResponseStream());
-}
-
-TEST_F(CrashTest, BidiStream) {
- auto server = CreateServerAndClient("bidi");
-
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_seconds(60, GPR_TIMESPAN)));
- KillClient();
- server->Shutdown();
- GPR_ASSERT(HadOneBidiStream());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
+ auto port = grpc_pick_unused_port_or_die();
+ std::ostringstream addr_stream;
+ addr_stream << "localhost:" << port;
+ auto addr = addr_stream.str();
+ client_.reset(new SubProcess({g_root + "/server_crash_test_client",
+ "--address=" + addr, "--mode=" + mode}));
+ GPR_ASSERT(client_);
+
+ ServerBuilder builder;
+ builder.AddListeningPort(addr, grpc::InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ return builder.BuildAndStart();
+ }
+
+ void KillClient() { client_.reset(); }
+
+ bool HadOneBidiStream() { return service_.bidi_stream_count() == 1; }
+
+ bool HadOneResponseStream() { return service_.response_stream_count() == 1; }
+
+ private:
+ std::unique_ptr<SubProcess> client_;
+ ServiceImpl service_;
+};
+
+TEST_F(CrashTest, ResponseStream) {
+ auto server = CreateServerAndClient("response");
+
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(60, GPR_TIMESPAN)));
+ KillClient();
+ server->Shutdown();
+ GPR_ASSERT(HadOneResponseStream());
+}
+
+TEST_F(CrashTest, BidiStream) {
+ auto server = CreateServerAndClient("bidi");
+
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_seconds(60, GPR_TIMESPAN)));
+ KillClient();
+ server->Shutdown();
+ GPR_ASSERT(HadOneBidiStream());
+}
+
+} // namespace
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
TString me = argv[0];
- auto lslash = me.rfind('/');
+ auto lslash = me.rfind('/');
if (lslash != TString::npos) {
- g_root = me.substr(0, lslash);
- } else {
- g_root = ".";
- }
-
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ g_root = me.substr(0, lslash);
+ } else {
+ g_root = ".";
+ }
+
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
index 99e8259aa9..202fb2836c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_crash_test_client.cc
@@ -1,72 +1,72 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <gflags/gflags.h>
-#include <iostream>
-#include <memory>
-#include <sstream>
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <gflags/gflags.h>
+#include <iostream>
+#include <memory>
+#include <sstream>
#include <util/generic/string.h>
-
-#include <grpc/support/log.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/cpp/util/test_config.h"
-
-DEFINE_string(address, "", "Address to connect to");
-DEFINE_string(mode, "", "Test mode to use");
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-
-int main(int argc, char** argv) {
- grpc::testing::InitTest(&argc, &argv, true);
- auto stub = grpc::testing::EchoTestService::NewStub(
- grpc::CreateChannel(FLAGS_address, grpc::InsecureChannelCredentials()));
-
- EchoRequest request;
- EchoResponse response;
- grpc::ClientContext context;
- context.set_wait_for_ready(true);
-
- if (FLAGS_mode == "bidi") {
- auto stream = stub->BidiStream(&context);
- for (int i = 0;; i++) {
- std::ostringstream msg;
- msg << "Hello " << i;
- request.set_message(msg.str());
- GPR_ASSERT(stream->Write(request));
- GPR_ASSERT(stream->Read(&response));
- GPR_ASSERT(response.message() == request.message());
- }
- } else if (FLAGS_mode == "response") {
- EchoRequest request;
- request.set_message("Hello");
- auto stream = stub->ResponseStream(&context, request);
- for (;;) {
- GPR_ASSERT(stream->Read(&response));
- }
- } else {
- gpr_log(GPR_ERROR, "invalid test mode '%s'", FLAGS_mode.c_str());
- return 1;
- }
-
- return 0;
-}
+
+#include <grpc/support/log.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/cpp/util/test_config.h"
+
+DEFINE_string(address, "", "Address to connect to");
+DEFINE_string(mode, "", "Test mode to use");
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+
+int main(int argc, char** argv) {
+ grpc::testing::InitTest(&argc, &argv, true);
+ auto stub = grpc::testing::EchoTestService::NewStub(
+ grpc::CreateChannel(FLAGS_address, grpc::InsecureChannelCredentials()));
+
+ EchoRequest request;
+ EchoResponse response;
+ grpc::ClientContext context;
+ context.set_wait_for_ready(true);
+
+ if (FLAGS_mode == "bidi") {
+ auto stream = stub->BidiStream(&context);
+ for (int i = 0;; i++) {
+ std::ostringstream msg;
+ msg << "Hello " << i;
+ request.set_message(msg.str());
+ GPR_ASSERT(stream->Write(request));
+ GPR_ASSERT(stream->Read(&response));
+ GPR_ASSERT(response.message() == request.message());
+ }
+ } else if (FLAGS_mode == "response") {
+ EchoRequest request;
+ request.set_message("Hello");
+ auto stream = stub->ResponseStream(&context, request);
+ for (;;) {
+ GPR_ASSERT(stream->Read(&response));
+ }
+ } else {
+ gpr_log(GPR_ERROR, "invalid test mode '%s'", FLAGS_mode.c_str());
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
index 973168411c..0f340516b0 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_early_return_test.cc
@@ -1,232 +1,232 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/grpc.h>
-#include <grpc/support/alloc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/string_ref_helper.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-namespace {
-
-const char kServerReturnStatusCode[] = "server_return_status_code";
-const char kServerDelayBeforeReturnUs[] = "server_delay_before_return_us";
-const char kServerReturnAfterNReads[] = "server_return_after_n_reads";
-
-class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
- public:
- // Unused methods are not implemented.
-
- Status RequestStream(ServerContext* context,
- ServerReader<EchoRequest>* reader,
- EchoResponse* response) override {
- int server_return_status_code =
- GetIntValueFromMetadata(context, kServerReturnStatusCode, 0);
- int server_delay_before_return_us =
- GetIntValueFromMetadata(context, kServerDelayBeforeReturnUs, 0);
- int server_return_after_n_reads =
- GetIntValueFromMetadata(context, kServerReturnAfterNReads, 0);
-
- EchoRequest request;
- while (server_return_after_n_reads--) {
- EXPECT_TRUE(reader->Read(&request));
- }
-
- response->set_message("response msg");
-
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(server_delay_before_return_us, GPR_TIMESPAN)));
-
- return Status(static_cast<StatusCode>(server_return_status_code), "");
- }
-
- Status BidiStream(
- ServerContext* context,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
- int server_return_status_code =
- GetIntValueFromMetadata(context, kServerReturnStatusCode, 0);
- int server_delay_before_return_us =
- GetIntValueFromMetadata(context, kServerDelayBeforeReturnUs, 0);
- int server_return_after_n_reads =
- GetIntValueFromMetadata(context, kServerReturnAfterNReads, 0);
-
- EchoRequest request;
- EchoResponse response;
- while (server_return_after_n_reads--) {
- EXPECT_TRUE(stream->Read(&request));
- response.set_message(request.message());
- EXPECT_TRUE(stream->Write(response));
- }
-
- gpr_sleep_until(gpr_time_add(
- gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_micros(server_delay_before_return_us, GPR_TIMESPAN)));
-
- return Status(static_cast<StatusCode>(server_return_status_code), "");
- }
-
- int GetIntValueFromMetadata(ServerContext* context, const char* key,
- int default_value) {
- auto metadata = context->client_metadata();
- if (metadata.find(key) != metadata.end()) {
- std::istringstream iss(ToString(metadata.find(key)->second));
- iss >> default_value;
- }
- return default_value;
- }
-};
-
-class ServerEarlyReturnTest : public ::testing::Test {
- protected:
- ServerEarlyReturnTest() : picked_port_(0) {}
-
- void SetUp() override {
- int port = grpc_pick_unused_port_or_die();
- picked_port_ = port;
- server_address_ << "127.0.0.1:" << port;
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
-
- channel_ = grpc::CreateChannel(server_address_.str(),
- InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- void TearDown() override {
- server_->Shutdown();
- if (picked_port_ > 0) {
- grpc_recycle_unused_port(picked_port_);
- }
- }
-
- // Client sends 20 requests and the server returns after reading 10 requests.
- // If return_cancel is true, server returns CANCELLED status. Otherwise it
- // returns OK.
- void DoBidiStream(bool return_cancelled) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- context.AddMetadata(kServerReturnAfterNReads, "10");
- if (return_cancelled) {
- // "1" means CANCELLED
- context.AddMetadata(kServerReturnStatusCode, "1");
- }
- context.AddMetadata(kServerDelayBeforeReturnUs, "10000");
-
- auto stream = stub_->BidiStream(&context);
-
- for (int i = 0; i < 20; i++) {
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/string_ref_helper.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+namespace {
+
+const char kServerReturnStatusCode[] = "server_return_status_code";
+const char kServerDelayBeforeReturnUs[] = "server_delay_before_return_us";
+const char kServerReturnAfterNReads[] = "server_return_after_n_reads";
+
+class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
+ public:
+ // Unused methods are not implemented.
+
+ Status RequestStream(ServerContext* context,
+ ServerReader<EchoRequest>* reader,
+ EchoResponse* response) override {
+ int server_return_status_code =
+ GetIntValueFromMetadata(context, kServerReturnStatusCode, 0);
+ int server_delay_before_return_us =
+ GetIntValueFromMetadata(context, kServerDelayBeforeReturnUs, 0);
+ int server_return_after_n_reads =
+ GetIntValueFromMetadata(context, kServerReturnAfterNReads, 0);
+
+ EchoRequest request;
+ while (server_return_after_n_reads--) {
+ EXPECT_TRUE(reader->Read(&request));
+ }
+
+ response->set_message("response msg");
+
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_micros(server_delay_before_return_us, GPR_TIMESPAN)));
+
+ return Status(static_cast<StatusCode>(server_return_status_code), "");
+ }
+
+ Status BidiStream(
+ ServerContext* context,
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
+ int server_return_status_code =
+ GetIntValueFromMetadata(context, kServerReturnStatusCode, 0);
+ int server_delay_before_return_us =
+ GetIntValueFromMetadata(context, kServerDelayBeforeReturnUs, 0);
+ int server_return_after_n_reads =
+ GetIntValueFromMetadata(context, kServerReturnAfterNReads, 0);
+
+ EchoRequest request;
+ EchoResponse response;
+ while (server_return_after_n_reads--) {
+ EXPECT_TRUE(stream->Read(&request));
+ response.set_message(request.message());
+ EXPECT_TRUE(stream->Write(response));
+ }
+
+ gpr_sleep_until(gpr_time_add(
+ gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_micros(server_delay_before_return_us, GPR_TIMESPAN)));
+
+ return Status(static_cast<StatusCode>(server_return_status_code), "");
+ }
+
+ int GetIntValueFromMetadata(ServerContext* context, const char* key,
+ int default_value) {
+ auto metadata = context->client_metadata();
+ if (metadata.find(key) != metadata.end()) {
+ std::istringstream iss(ToString(metadata.find(key)->second));
+ iss >> default_value;
+ }
+ return default_value;
+ }
+};
+
+class ServerEarlyReturnTest : public ::testing::Test {
+ protected:
+ ServerEarlyReturnTest() : picked_port_(0) {}
+
+ void SetUp() override {
+ int port = grpc_pick_unused_port_or_die();
+ picked_port_ = port;
+ server_address_ << "127.0.0.1:" << port;
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+
+ channel_ = grpc::CreateChannel(server_address_.str(),
+ InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ void TearDown() override {
+ server_->Shutdown();
+ if (picked_port_ > 0) {
+ grpc_recycle_unused_port(picked_port_);
+ }
+ }
+
+ // Client sends 20 requests and the server returns after reading 10 requests.
+ // If return_cancel is true, server returns CANCELLED status. Otherwise it
+ // returns OK.
+ void DoBidiStream(bool return_cancelled) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ context.AddMetadata(kServerReturnAfterNReads, "10");
+ if (return_cancelled) {
+ // "1" means CANCELLED
+ context.AddMetadata(kServerReturnStatusCode, "1");
+ }
+ context.AddMetadata(kServerDelayBeforeReturnUs, "10000");
+
+ auto stream = stub_->BidiStream(&context);
+
+ for (int i = 0; i < 20; i++) {
request.set_message(TString("hello") + ToString(i));
- bool write_ok = stream->Write(request);
- bool read_ok = stream->Read(&response);
- if (i < 10) {
- EXPECT_TRUE(write_ok);
- EXPECT_TRUE(read_ok);
- EXPECT_EQ(response.message(), request.message());
- } else {
- EXPECT_FALSE(read_ok);
- }
- }
-
- stream->WritesDone();
- EXPECT_FALSE(stream->Read(&response));
-
- Status s = stream->Finish();
- if (return_cancelled) {
- EXPECT_EQ(s.error_code(), StatusCode::CANCELLED);
- } else {
- EXPECT_TRUE(s.ok());
- }
- }
-
- void DoRequestStream(bool return_cancelled) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
-
- context.AddMetadata(kServerReturnAfterNReads, "10");
- if (return_cancelled) {
- // "1" means CANCELLED
- context.AddMetadata(kServerReturnStatusCode, "1");
- }
- context.AddMetadata(kServerDelayBeforeReturnUs, "10000");
-
- auto stream = stub_->RequestStream(&context, &response);
- for (int i = 0; i < 20; i++) {
+ bool write_ok = stream->Write(request);
+ bool read_ok = stream->Read(&response);
+ if (i < 10) {
+ EXPECT_TRUE(write_ok);
+ EXPECT_TRUE(read_ok);
+ EXPECT_EQ(response.message(), request.message());
+ } else {
+ EXPECT_FALSE(read_ok);
+ }
+ }
+
+ stream->WritesDone();
+ EXPECT_FALSE(stream->Read(&response));
+
+ Status s = stream->Finish();
+ if (return_cancelled) {
+ EXPECT_EQ(s.error_code(), StatusCode::CANCELLED);
+ } else {
+ EXPECT_TRUE(s.ok());
+ }
+ }
+
+ void DoRequestStream(bool return_cancelled) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+
+ context.AddMetadata(kServerReturnAfterNReads, "10");
+ if (return_cancelled) {
+ // "1" means CANCELLED
+ context.AddMetadata(kServerReturnStatusCode, "1");
+ }
+ context.AddMetadata(kServerDelayBeforeReturnUs, "10000");
+
+ auto stream = stub_->RequestStream(&context, &response);
+ for (int i = 0; i < 20; i++) {
request.set_message(TString("hello") + ToString(i));
- bool written = stream->Write(request);
- if (i < 10) {
- EXPECT_TRUE(written);
- }
- }
- stream->WritesDone();
- Status s = stream->Finish();
- if (return_cancelled) {
- EXPECT_EQ(s.error_code(), StatusCode::CANCELLED);
- } else {
- EXPECT_TRUE(s.ok());
- }
- }
-
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
- TestServiceImpl service_;
- int picked_port_;
-};
-
-TEST_F(ServerEarlyReturnTest, BidiStreamEarlyOk) { DoBidiStream(false); }
-
-TEST_F(ServerEarlyReturnTest, BidiStreamEarlyCancel) { DoBidiStream(true); }
-
-TEST_F(ServerEarlyReturnTest, RequestStreamEarlyOK) { DoRequestStream(false); }
-TEST_F(ServerEarlyReturnTest, RequestStreamEarlyCancel) {
- DoRequestStream(true);
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ bool written = stream->Write(request);
+ if (i < 10) {
+ EXPECT_TRUE(written);
+ }
+ }
+ stream->WritesDone();
+ Status s = stream->Finish();
+ if (return_cancelled) {
+ EXPECT_EQ(s.error_code(), StatusCode::CANCELLED);
+ } else {
+ EXPECT_TRUE(s.ok());
+ }
+ }
+
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ TestServiceImpl service_;
+ int picked_port_;
+};
+
+TEST_F(ServerEarlyReturnTest, BidiStreamEarlyOk) { DoBidiStream(false); }
+
+TEST_F(ServerEarlyReturnTest, BidiStreamEarlyCancel) { DoBidiStream(true); }
+
+TEST_F(ServerEarlyReturnTest, RequestStreamEarlyOK) { DoRequestStream(false); }
+TEST_F(ServerEarlyReturnTest, RequestStreamEarlyCancel) {
+ DoRequestStream(true);
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make b/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make
index 4405208f4c..161176f141 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make
+++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors/ya.make
@@ -1,32 +1,32 @@
GTEST_UGLY()
-
-OWNER(
- dvshkurko
- g:ymake
-)
-
-ADDINCL(
+
+OWNER(
+ dvshkurko
+ g:ymake
+)
+
+ADDINCL(
${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
- ${ARCADIA_ROOT}/contrib/libs/grpc
-)
-
-PEERDIR(
- contrib/libs/grpc/src/proto/grpc/core
- contrib/libs/grpc/src/proto/grpc/testing
- contrib/libs/grpc/src/proto/grpc/testing/duplicate
- contrib/libs/grpc/test/core/util
- contrib/libs/grpc/test/cpp/end2end
- contrib/libs/grpc/test/cpp/util
-)
-
-NO_COMPILER_WARNINGS()
-
-SRCDIR(
- contrib/libs/grpc/test/cpp/end2end
-)
-
-SRCS(
- server_interceptors_end2end_test.cc
-)
-
-END()
+ ${ARCADIA_ROOT}/contrib/libs/grpc
+)
+
+PEERDIR(
+ contrib/libs/grpc/src/proto/grpc/core
+ contrib/libs/grpc/src/proto/grpc/testing
+ contrib/libs/grpc/src/proto/grpc/testing/duplicate
+ contrib/libs/grpc/test/core/util
+ contrib/libs/grpc/test/cpp/end2end
+ contrib/libs/grpc/test/cpp/util
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(
+ contrib/libs/grpc/test/cpp/end2end
+)
+
+SRCS(
+ server_interceptors_end2end_test.cc
+)
+
+END()
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
index 2f6a71ebd4..6d2dc772ef 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_interceptors_end2end_test.cc
@@ -1,708 +1,708 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <memory>
-#include <vector>
-
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/generic/generic_stub.h>
-#include <grpcpp/impl/codegen/proto_utils.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-#include <grpcpp/support/server_interceptor.h>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/interceptors_util.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/byte_buffer_proto_helper.h"
-
-#include <gtest/gtest.h>
-
-namespace grpc {
-namespace testing {
-namespace {
-
-class LoggingInterceptor : public experimental::Interceptor {
- public:
- LoggingInterceptor(experimental::ServerRpcInfo* info) {
- info_ = info;
-
- // Check the method name and compare to the type
- const char* method = info->method();
- experimental::ServerRpcInfo::Type type = info->type();
-
- // Check that we use one of our standard methods with expected type.
- // Also allow the health checking service.
- // We accept BIDI_STREAMING for Echo in case it's an AsyncGenericService
- // being tested (the GenericRpc test).
- // The empty method is for the Unimplemented requests that arise
- // when draining the CQ.
- EXPECT_TRUE(
- strstr(method, "/grpc.health") == method ||
- (strcmp(method, "/grpc.testing.EchoTestService/Echo") == 0 &&
- (type == experimental::ServerRpcInfo::Type::UNARY ||
- type == experimental::ServerRpcInfo::Type::BIDI_STREAMING)) ||
- (strcmp(method, "/grpc.testing.EchoTestService/RequestStream") == 0 &&
- type == experimental::ServerRpcInfo::Type::CLIENT_STREAMING) ||
- (strcmp(method, "/grpc.testing.EchoTestService/ResponseStream") == 0 &&
- type == experimental::ServerRpcInfo::Type::SERVER_STREAMING) ||
- (strcmp(method, "/grpc.testing.EchoTestService/BidiStream") == 0 &&
- type == experimental::ServerRpcInfo::Type::BIDI_STREAMING) ||
- strcmp(method, "/grpc.testing.EchoTestService/Unimplemented") == 0 ||
- (strcmp(method, "") == 0 &&
- type == experimental::ServerRpcInfo::Type::BIDI_STREAMING));
- }
-
- void Intercept(experimental::InterceptorBatchMethods* methods) override {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
- auto* map = methods->GetSendInitialMetadata();
- // Got nothing better to do here for now
- EXPECT_EQ(map->size(), static_cast<unsigned>(0));
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- EchoRequest req;
- auto* buffer = methods->GetSerializedSendMessage();
- auto copied_buffer = *buffer;
- EXPECT_TRUE(
- SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
- .ok());
- EXPECT_TRUE(req.message().find("Hello") == 0);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_STATUS)) {
- auto* map = methods->GetSendTrailingMetadata();
- bool found = false;
- // Check that we received the metadata as an echo
- for (const auto& pair : *map) {
- found = pair.first.find("testkey") == 0 &&
- pair.second.find("testvalue") == 0;
- if (found) break;
- }
- EXPECT_EQ(found, true);
- auto status = methods->GetSendStatus();
- EXPECT_EQ(status.ok(), true);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
- auto* map = methods->GetRecvInitialMetadata();
- bool found = false;
- // Check that we received the metadata as an echo
- for (const auto& pair : *map) {
- found = pair.first.find("testkey") == 0 &&
- pair.second.find("testvalue") == 0;
- if (found) break;
- }
- EXPECT_EQ(found, true);
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
- EchoResponse* resp =
- static_cast<EchoResponse*>(methods->GetRecvMessage());
- if (resp != nullptr) {
- EXPECT_TRUE(resp->message().find("Hello") == 0);
- }
- }
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::POST_RECV_CLOSE)) {
- // Got nothing interesting to do here
- }
- methods->Proceed();
- }
-
- private:
- experimental::ServerRpcInfo* info_;
-};
-
-class LoggingInterceptorFactory
- : public experimental::ServerInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateServerInterceptor(
- experimental::ServerRpcInfo* info) override {
- return new LoggingInterceptor(info);
- }
-};
-
-// Test if SendMessage function family works as expected for sync/callback apis
-class SyncSendMessageTester : public experimental::Interceptor {
- public:
- SyncSendMessageTester(experimental::ServerRpcInfo* /*info*/) {}
-
- void Intercept(experimental::InterceptorBatchMethods* methods) override {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- string old_msg =
- static_cast<const EchoRequest*>(methods->GetSendMessage())->message();
- EXPECT_EQ(old_msg.find("Hello"), 0u);
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <memory>
+#include <vector>
+
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/generic/generic_stub.h>
+#include <grpcpp/impl/codegen/proto_utils.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/server_interceptor.h>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/interceptors_util.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/byte_buffer_proto_helper.h"
+
+#include <gtest/gtest.h>
+
+namespace grpc {
+namespace testing {
+namespace {
+
+class LoggingInterceptor : public experimental::Interceptor {
+ public:
+ LoggingInterceptor(experimental::ServerRpcInfo* info) {
+ info_ = info;
+
+ // Check the method name and compare to the type
+ const char* method = info->method();
+ experimental::ServerRpcInfo::Type type = info->type();
+
+ // Check that we use one of our standard methods with expected type.
+ // Also allow the health checking service.
+ // We accept BIDI_STREAMING for Echo in case it's an AsyncGenericService
+ // being tested (the GenericRpc test).
+ // The empty method is for the Unimplemented requests that arise
+ // when draining the CQ.
+ EXPECT_TRUE(
+ strstr(method, "/grpc.health") == method ||
+ (strcmp(method, "/grpc.testing.EchoTestService/Echo") == 0 &&
+ (type == experimental::ServerRpcInfo::Type::UNARY ||
+ type == experimental::ServerRpcInfo::Type::BIDI_STREAMING)) ||
+ (strcmp(method, "/grpc.testing.EchoTestService/RequestStream") == 0 &&
+ type == experimental::ServerRpcInfo::Type::CLIENT_STREAMING) ||
+ (strcmp(method, "/grpc.testing.EchoTestService/ResponseStream") == 0 &&
+ type == experimental::ServerRpcInfo::Type::SERVER_STREAMING) ||
+ (strcmp(method, "/grpc.testing.EchoTestService/BidiStream") == 0 &&
+ type == experimental::ServerRpcInfo::Type::BIDI_STREAMING) ||
+ strcmp(method, "/grpc.testing.EchoTestService/Unimplemented") == 0 ||
+ (strcmp(method, "") == 0 &&
+ type == experimental::ServerRpcInfo::Type::BIDI_STREAMING));
+ }
+
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA)) {
+ auto* map = methods->GetSendInitialMetadata();
+ // Got nothing better to do here for now
+ EXPECT_EQ(map->size(), static_cast<unsigned>(0));
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ EchoRequest req;
+ auto* buffer = methods->GetSerializedSendMessage();
+ auto copied_buffer = *buffer;
+ EXPECT_TRUE(
+ SerializationTraits<EchoRequest>::Deserialize(&copied_buffer, &req)
+ .ok());
+ EXPECT_TRUE(req.message().find("Hello") == 0);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_STATUS)) {
+ auto* map = methods->GetSendTrailingMetadata();
+ bool found = false;
+ // Check that we received the metadata as an echo
+ for (const auto& pair : *map) {
+ found = pair.first.find("testkey") == 0 &&
+ pair.second.find("testvalue") == 0;
+ if (found) break;
+ }
+ EXPECT_EQ(found, true);
+ auto status = methods->GetSendStatus();
+ EXPECT_EQ(status.ok(), true);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA)) {
+ auto* map = methods->GetRecvInitialMetadata();
+ bool found = false;
+ // Check that we received the metadata as an echo
+ for (const auto& pair : *map) {
+ found = pair.first.find("testkey") == 0 &&
+ pair.second.find("testvalue") == 0;
+ if (found) break;
+ }
+ EXPECT_EQ(found, true);
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_MESSAGE)) {
+ EchoResponse* resp =
+ static_cast<EchoResponse*>(methods->GetRecvMessage());
+ if (resp != nullptr) {
+ EXPECT_TRUE(resp->message().find("Hello") == 0);
+ }
+ }
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::POST_RECV_CLOSE)) {
+ // Got nothing interesting to do here
+ }
+ methods->Proceed();
+ }
+
+ private:
+ experimental::ServerRpcInfo* info_;
+};
+
+class LoggingInterceptorFactory
+ : public experimental::ServerInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::ServerRpcInfo* info) override {
+ return new LoggingInterceptor(info);
+ }
+};
+
+// Test if SendMessage function family works as expected for sync/callback apis
+class SyncSendMessageTester : public experimental::Interceptor {
+ public:
+ SyncSendMessageTester(experimental::ServerRpcInfo* /*info*/) {}
+
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ string old_msg =
+ static_cast<const EchoRequest*>(methods->GetSendMessage())->message();
+ EXPECT_EQ(old_msg.find("Hello"), 0u);
new_msg_.set_message(TString("World" + old_msg).c_str());
- methods->ModifySendMessage(&new_msg_);
- }
- methods->Proceed();
- }
-
- private:
- EchoRequest new_msg_;
-};
-
-class SyncSendMessageTesterFactory
- : public experimental::ServerInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateServerInterceptor(
- experimental::ServerRpcInfo* info) override {
- return new SyncSendMessageTester(info);
- }
-};
-
-// Test if SendMessage function family works as expected for sync/callback apis
-class SyncSendMessageVerifier : public experimental::Interceptor {
- public:
- SyncSendMessageVerifier(experimental::ServerRpcInfo* /*info*/) {}
-
- void Intercept(experimental::InterceptorBatchMethods* methods) override {
- if (methods->QueryInterceptionHookPoint(
- experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
- // Make sure that the changes made in SyncSendMessageTester persisted
+ methods->ModifySendMessage(&new_msg_);
+ }
+ methods->Proceed();
+ }
+
+ private:
+ EchoRequest new_msg_;
+};
+
+class SyncSendMessageTesterFactory
+ : public experimental::ServerInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::ServerRpcInfo* info) override {
+ return new SyncSendMessageTester(info);
+ }
+};
+
+// Test if SendMessage function family works as expected for sync/callback apis
+class SyncSendMessageVerifier : public experimental::Interceptor {
+ public:
+ SyncSendMessageVerifier(experimental::ServerRpcInfo* /*info*/) {}
+
+ void Intercept(experimental::InterceptorBatchMethods* methods) override {
+ if (methods->QueryInterceptionHookPoint(
+ experimental::InterceptionHookPoints::PRE_SEND_MESSAGE)) {
+ // Make sure that the changes made in SyncSendMessageTester persisted
string old_msg =
- static_cast<const EchoRequest*>(methods->GetSendMessage())->message();
- EXPECT_EQ(old_msg.find("World"), 0u);
-
- // Remove the "World" part of the string that we added earlier
- new_msg_.set_message(old_msg.erase(0, 5));
- methods->ModifySendMessage(&new_msg_);
-
- // LoggingInterceptor verifies that changes got reverted
- }
- methods->Proceed();
- }
-
- private:
- EchoRequest new_msg_;
-};
-
-class SyncSendMessageVerifierFactory
- : public experimental::ServerInterceptorFactoryInterface {
- public:
- virtual experimental::Interceptor* CreateServerInterceptor(
- experimental::ServerRpcInfo* info) override {
- return new SyncSendMessageVerifier(info);
- }
-};
-
-void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
- ClientContext ctx;
- EchoRequest req;
- EchoResponse resp;
- ctx.AddMetadata("testkey", "testvalue");
- auto stream = stub->BidiStream(&ctx);
- for (auto i = 0; i < 10; i++) {
+ static_cast<const EchoRequest*>(methods->GetSendMessage())->message();
+ EXPECT_EQ(old_msg.find("World"), 0u);
+
+ // Remove the "World" part of the string that we added earlier
+ new_msg_.set_message(old_msg.erase(0, 5));
+ methods->ModifySendMessage(&new_msg_);
+
+ // LoggingInterceptor verifies that changes got reverted
+ }
+ methods->Proceed();
+ }
+
+ private:
+ EchoRequest new_msg_;
+};
+
+class SyncSendMessageVerifierFactory
+ : public experimental::ServerInterceptorFactoryInterface {
+ public:
+ virtual experimental::Interceptor* CreateServerInterceptor(
+ experimental::ServerRpcInfo* info) override {
+ return new SyncSendMessageVerifier(info);
+ }
+};
+
+void MakeBidiStreamingCall(const std::shared_ptr<Channel>& channel) {
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+ ClientContext ctx;
+ EchoRequest req;
+ EchoResponse resp;
+ ctx.AddMetadata("testkey", "testvalue");
+ auto stream = stub->BidiStream(&ctx);
+ for (auto i = 0; i < 10; i++) {
req.set_message("Hello" + ::ToString(i));
- stream->Write(req);
- stream->Read(&resp);
- EXPECT_EQ(req.message(), resp.message());
- }
- ASSERT_TRUE(stream->WritesDone());
- Status s = stream->Finish();
- EXPECT_EQ(s.ok(), true);
-}
-
-class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test {
- protected:
- ServerInterceptorsEnd2endSyncUnaryTest() {
- int port = 5004; // grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
+ stream->Write(req);
+ stream->Read(&resp);
+ EXPECT_EQ(req.message(), resp.message());
+ }
+ ASSERT_TRUE(stream->WritesDone());
+ Status s = stream->Finish();
+ EXPECT_EQ(s.ok(), true);
+}
+
+class ServerInterceptorsEnd2endSyncUnaryTest : public ::testing::Test {
+ protected:
+ ServerInterceptorsEnd2endSyncUnaryTest() {
+ int port = 5004; // grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
server_address_ = "localhost:" + ::ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
-
- std::vector<
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new SyncSendMessageTesterFactory()));
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new SyncSendMessageVerifierFactory()));
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new LoggingInterceptorFactory()));
- // Add 20 dummy interceptor factories and null interceptor factories
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- creators.push_back(std::unique_ptr<NullInterceptorFactory>(
- new NullInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- server_ = builder.BuildAndStart();
- }
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+
+ std::vector<
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new SyncSendMessageTesterFactory()));
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new SyncSendMessageVerifierFactory()));
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new LoggingInterceptorFactory()));
+ // Add 20 dummy interceptor factories and null interceptor factories
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ creators.push_back(std::unique_ptr<NullInterceptorFactory>(
+ new NullInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ server_ = builder.BuildAndStart();
+ }
TString server_address_;
- TestServiceImpl service_;
- std::unique_ptr<Server> server_;
-};
-
-TEST_F(ServerInterceptorsEnd2endSyncUnaryTest, UnaryTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- auto channel =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- MakeCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
- protected:
- ServerInterceptorsEnd2endSyncStreamingTest() {
- int port = 5005; // grpc_pick_unused_port_or_die();
-
- ServerBuilder builder;
+ TestServiceImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_F(ServerInterceptorsEnd2endSyncUnaryTest, UnaryTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ auto channel =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ MakeCall(channel);
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+class ServerInterceptorsEnd2endSyncStreamingTest : public ::testing::Test {
+ protected:
+ ServerInterceptorsEnd2endSyncStreamingTest() {
+ int port = 5005; // grpc_pick_unused_port_or_die();
+
+ ServerBuilder builder;
server_address_ = "localhost:" + ::ToString(port);
- builder.AddListeningPort(server_address_, InsecureServerCredentials());
- builder.RegisterService(&service_);
-
- std::vector<
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new SyncSendMessageTesterFactory()));
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new SyncSendMessageVerifierFactory()));
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new LoggingInterceptorFactory()));
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- server_ = builder.BuildAndStart();
- }
+ builder.AddListeningPort(server_address_, InsecureServerCredentials());
+ builder.RegisterService(&service_);
+
+ std::vector<
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new SyncSendMessageTesterFactory()));
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new SyncSendMessageVerifierFactory()));
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new LoggingInterceptorFactory()));
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ server_ = builder.BuildAndStart();
+ }
TString server_address_;
- EchoTestServiceStreamingImpl service_;
- std::unique_ptr<Server> server_;
-};
-
-TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, ClientStreamingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- auto channel =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- MakeClientStreamingCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, ServerStreamingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- auto channel =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- MakeServerStreamingCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, BidiStreamingTest) {
- ChannelArguments args;
- DummyInterceptor::Reset();
- auto channel =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- MakeBidiStreamingCall(channel);
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-}
-
-class ServerInterceptorsAsyncEnd2endTest : public ::testing::Test {};
-
-TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
- DummyInterceptor::Reset();
- int port = 5006; // grpc_pick_unused_port_or_die();
+ EchoTestServiceStreamingImpl service_;
+ std::unique_ptr<Server> server_;
+};
+
+TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, ClientStreamingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ auto channel =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ MakeClientStreamingCall(channel);
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, ServerStreamingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ auto channel =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ MakeServerStreamingCall(channel);
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+TEST_F(ServerInterceptorsEnd2endSyncStreamingTest, BidiStreamingTest) {
+ ChannelArguments args;
+ DummyInterceptor::Reset();
+ auto channel =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ MakeBidiStreamingCall(channel);
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+}
+
+class ServerInterceptorsAsyncEnd2endTest : public ::testing::Test {};
+
+TEST_F(ServerInterceptorsAsyncEnd2endTest, UnaryTest) {
+ DummyInterceptor::Reset();
+ int port = 5006; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
- ServerBuilder builder;
- EchoTestService::AsyncService service;
- builder.AddListeningPort(server_address, InsecureServerCredentials());
- builder.RegisterService(&service);
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new LoggingInterceptorFactory()));
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- auto cq = builder.AddCompletionQueue();
- auto server = builder.BuildAndStart();
-
- ChannelArguments args;
- auto channel =
- grpc::CreateChannel(server_address, InsecureChannelCredentials());
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
-
- send_request.set_message("Hello");
- cli_ctx.AddMetadata("testkey", "testvalue");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub->AsyncEcho(&cli_ctx, send_request, cq.get()));
-
- service.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq.get(),
- cq.get(), tag(2));
-
- response_reader->Finish(&recv_response, &recv_status, tag(4));
-
- Verifier().Expect(2, true).Verify(cq.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
- srv_ctx.AddTrailingMetadata("testkey", "testvalue");
-
- send_response.set_message(recv_request.message());
- response_writer.Finish(send_response, Status::OK, tag(3));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq.get());
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
- "testvalue"));
-
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-
- server->Shutdown();
- cq->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- while (cq->Next(&ignored_tag, &ignored_ok))
- ;
- // grpc_recycle_unused_port(port);
-}
-
-TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
- DummyInterceptor::Reset();
- int port = 5007; // grpc_pick_unused_port_or_die();
+ ServerBuilder builder;
+ EchoTestService::AsyncService service;
+ builder.AddListeningPort(server_address, InsecureServerCredentials());
+ builder.RegisterService(&service);
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new LoggingInterceptorFactory()));
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ auto cq = builder.AddCompletionQueue();
+ auto server = builder.BuildAndStart();
+
+ ChannelArguments args;
+ auto channel =
+ grpc::CreateChannel(server_address, InsecureChannelCredentials());
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncResponseWriter<EchoResponse> response_writer(&srv_ctx);
+
+ send_request.set_message("Hello");
+ cli_ctx.AddMetadata("testkey", "testvalue");
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub->AsyncEcho(&cli_ctx, send_request, cq.get()));
+
+ service.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq.get(),
+ cq.get(), tag(2));
+
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+
+ Verifier().Expect(2, true).Verify(cq.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
+ srv_ctx.AddTrailingMetadata("testkey", "testvalue");
+
+ send_response.set_message(recv_request.message());
+ response_writer.Finish(send_response, Status::OK, tag(3));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq.get());
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
+ "testvalue"));
+
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+
+ server->Shutdown();
+ cq->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cq->Next(&ignored_tag, &ignored_ok))
+ ;
+ // grpc_recycle_unused_port(port);
+}
+
+TEST_F(ServerInterceptorsAsyncEnd2endTest, BidiStreamingTest) {
+ DummyInterceptor::Reset();
+ int port = 5007; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
- ServerBuilder builder;
- EchoTestService::AsyncService service;
- builder.AddListeningPort(server_address, InsecureServerCredentials());
- builder.RegisterService(&service);
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.push_back(
- std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
- new LoggingInterceptorFactory()));
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- auto cq = builder.AddCompletionQueue();
- auto server = builder.BuildAndStart();
-
- ChannelArguments args;
- auto channel =
- grpc::CreateChannel(server_address, InsecureChannelCredentials());
- auto stub = grpc::testing::EchoTestService::NewStub(channel);
-
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- ServerContext srv_ctx;
- grpc::ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
-
- send_request.set_message("Hello");
- cli_ctx.AddMetadata("testkey", "testvalue");
- std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
- cli_stream(stub->AsyncBidiStream(&cli_ctx, cq.get(), tag(1)));
-
- service.RequestBidiStream(&srv_ctx, &srv_stream, cq.get(), cq.get(), tag(2));
-
- Verifier().Expect(1, true).Expect(2, true).Verify(cq.get());
-
- EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
- srv_ctx.AddTrailingMetadata("testkey", "testvalue");
-
- cli_stream->Write(send_request, tag(3));
- srv_stream.Read(&recv_request, tag(4));
- Verifier().Expect(3, true).Expect(4, true).Verify(cq.get());
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- srv_stream.Write(send_response, tag(5));
- cli_stream->Read(&recv_response, tag(6));
- Verifier().Expect(5, true).Expect(6, true).Verify(cq.get());
- EXPECT_EQ(send_response.message(), recv_response.message());
-
- cli_stream->WritesDone(tag(7));
- srv_stream.Read(&recv_request, tag(8));
- Verifier().Expect(7, true).Expect(8, false).Verify(cq.get());
-
- srv_stream.Finish(Status::OK, tag(9));
- cli_stream->Finish(&recv_status, tag(10));
- Verifier().Expect(9, true).Expect(10, true).Verify(cq.get());
-
- EXPECT_TRUE(recv_status.ok());
- EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
- "testvalue"));
-
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-
- server->Shutdown();
- cq->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- while (cq->Next(&ignored_tag, &ignored_ok))
- ;
- // grpc_recycle_unused_port(port);
-}
-
-TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
- DummyInterceptor::Reset();
- int port = 5008; // grpc_pick_unused_port_or_die();
+ ServerBuilder builder;
+ EchoTestService::AsyncService service;
+ builder.AddListeningPort(server_address, InsecureServerCredentials());
+ builder.RegisterService(&service);
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.push_back(
+ std::unique_ptr<experimental::ServerInterceptorFactoryInterface>(
+ new LoggingInterceptorFactory()));
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ auto cq = builder.AddCompletionQueue();
+ auto server = builder.BuildAndStart();
+
+ ChannelArguments args;
+ auto channel =
+ grpc::CreateChannel(server_address, InsecureChannelCredentials());
+ auto stub = grpc::testing::EchoTestService::NewStub(channel);
+
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ ServerContext srv_ctx;
+ grpc::ServerAsyncReaderWriter<EchoResponse, EchoRequest> srv_stream(&srv_ctx);
+
+ send_request.set_message("Hello");
+ cli_ctx.AddMetadata("testkey", "testvalue");
+ std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse>>
+ cli_stream(stub->AsyncBidiStream(&cli_ctx, cq.get(), tag(1)));
+
+ service.RequestBidiStream(&srv_ctx, &srv_stream, cq.get(), cq.get(), tag(2));
+
+ Verifier().Expect(1, true).Expect(2, true).Verify(cq.get());
+
+ EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
+ srv_ctx.AddTrailingMetadata("testkey", "testvalue");
+
+ cli_stream->Write(send_request, tag(3));
+ srv_stream.Read(&recv_request, tag(4));
+ Verifier().Expect(3, true).Expect(4, true).Verify(cq.get());
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ srv_stream.Write(send_response, tag(5));
+ cli_stream->Read(&recv_response, tag(6));
+ Verifier().Expect(5, true).Expect(6, true).Verify(cq.get());
+ EXPECT_EQ(send_response.message(), recv_response.message());
+
+ cli_stream->WritesDone(tag(7));
+ srv_stream.Read(&recv_request, tag(8));
+ Verifier().Expect(7, true).Expect(8, false).Verify(cq.get());
+
+ srv_stream.Finish(Status::OK, tag(9));
+ cli_stream->Finish(&recv_status, tag(10));
+ Verifier().Expect(9, true).Expect(10, true).Verify(cq.get());
+
+ EXPECT_TRUE(recv_status.ok());
+ EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
+ "testvalue"));
+
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+
+ server->Shutdown();
+ cq->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cq->Next(&ignored_tag, &ignored_ok))
+ ;
+ // grpc_recycle_unused_port(port);
+}
+
+TEST_F(ServerInterceptorsAsyncEnd2endTest, GenericRPCTest) {
+ DummyInterceptor::Reset();
+ int port = 5008; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
- ServerBuilder builder;
- AsyncGenericService service;
- builder.AddListeningPort(server_address, InsecureServerCredentials());
- builder.RegisterAsyncGenericService(&service);
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- auto srv_cq = builder.AddCompletionQueue();
- CompletionQueue cli_cq;
- auto server = builder.BuildAndStart();
-
- ChannelArguments args;
- auto channel =
- grpc::CreateChannel(server_address, InsecureChannelCredentials());
- GenericStub generic_stub(channel);
-
+ ServerBuilder builder;
+ AsyncGenericService service;
+ builder.AddListeningPort(server_address, InsecureServerCredentials());
+ builder.RegisterAsyncGenericService(&service);
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ auto srv_cq = builder.AddCompletionQueue();
+ CompletionQueue cli_cq;
+ auto server = builder.BuildAndStart();
+
+ ChannelArguments args;
+ auto channel =
+ grpc::CreateChannel(server_address, InsecureChannelCredentials());
+ GenericStub generic_stub(channel);
+
const TString kMethodName("/grpc.cpp.test.util.EchoTestService/Echo");
- EchoRequest send_request;
- EchoRequest recv_request;
- EchoResponse send_response;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- GenericServerContext srv_ctx;
- GenericServerAsyncReaderWriter stream(&srv_ctx);
-
- // The string needs to be long enough to test heap-based slice.
- send_request.set_message("Hello");
- cli_ctx.AddMetadata("testkey", "testvalue");
-
+ EchoRequest send_request;
+ EchoRequest recv_request;
+ EchoResponse send_response;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ GenericServerContext srv_ctx;
+ GenericServerAsyncReaderWriter stream(&srv_ctx);
+
+ // The string needs to be long enough to test heap-based slice.
+ send_request.set_message("Hello");
+ cli_ctx.AddMetadata("testkey", "testvalue");
+
CompletionQueue* cq = srv_cq.get();
std::thread request_call([cq]() { Verifier().Expect(4, true).Verify(cq); });
- std::unique_ptr<GenericClientAsyncReaderWriter> call =
- generic_stub.PrepareCall(&cli_ctx, kMethodName, &cli_cq);
- call->StartCall(tag(1));
- Verifier().Expect(1, true).Verify(&cli_cq);
- std::unique_ptr<ByteBuffer> send_buffer =
- SerializeToByteBuffer(&send_request);
- call->Write(*send_buffer, tag(2));
- // Send ByteBuffer can be destroyed after calling Write.
- send_buffer.reset();
- Verifier().Expect(2, true).Verify(&cli_cq);
- call->WritesDone(tag(3));
- Verifier().Expect(3, true).Verify(&cli_cq);
-
- service.RequestCall(&srv_ctx, &stream, srv_cq.get(), srv_cq.get(), tag(4));
-
+ std::unique_ptr<GenericClientAsyncReaderWriter> call =
+ generic_stub.PrepareCall(&cli_ctx, kMethodName, &cli_cq);
+ call->StartCall(tag(1));
+ Verifier().Expect(1, true).Verify(&cli_cq);
+ std::unique_ptr<ByteBuffer> send_buffer =
+ SerializeToByteBuffer(&send_request);
+ call->Write(*send_buffer, tag(2));
+ // Send ByteBuffer can be destroyed after calling Write.
+ send_buffer.reset();
+ Verifier().Expect(2, true).Verify(&cli_cq);
+ call->WritesDone(tag(3));
+ Verifier().Expect(3, true).Verify(&cli_cq);
+
+ service.RequestCall(&srv_ctx, &stream, srv_cq.get(), srv_cq.get(), tag(4));
+
request_call.join();
- EXPECT_EQ(kMethodName, srv_ctx.method());
- EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
- srv_ctx.AddTrailingMetadata("testkey", "testvalue");
-
- ByteBuffer recv_buffer;
- stream.Read(&recv_buffer, tag(5));
- Verifier().Expect(5, true).Verify(srv_cq.get());
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
- EXPECT_EQ(send_request.message(), recv_request.message());
-
- send_response.set_message(recv_request.message());
- send_buffer = SerializeToByteBuffer(&send_response);
- stream.Write(*send_buffer, tag(6));
- send_buffer.reset();
- Verifier().Expect(6, true).Verify(srv_cq.get());
-
- stream.Finish(Status::OK, tag(7));
- // Shutdown srv_cq before we try to get the tag back, to verify that the
- // interception API handles completion queue shutdowns that take place before
- // all the tags are returned
- srv_cq->Shutdown();
- Verifier().Expect(7, true).Verify(srv_cq.get());
-
- recv_buffer.Clear();
- call->Read(&recv_buffer, tag(8));
- Verifier().Expect(8, true).Verify(&cli_cq);
- EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
-
- call->Finish(&recv_status, tag(9));
- cli_cq.Shutdown();
- Verifier().Expect(9, true).Verify(&cli_cq);
-
- EXPECT_EQ(send_response.message(), recv_response.message());
- EXPECT_TRUE(recv_status.ok());
- EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
- "testvalue"));
-
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-
- server->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- while (cli_cq.Next(&ignored_tag, &ignored_ok))
- ;
- while (srv_cq->Next(&ignored_tag, &ignored_ok))
- ;
- // grpc_recycle_unused_port(port);
-}
-
-TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) {
- DummyInterceptor::Reset();
- int port = 5009; // grpc_pick_unused_port_or_die();
+ EXPECT_EQ(kMethodName, srv_ctx.method());
+ EXPECT_TRUE(CheckMetadata(srv_ctx.client_metadata(), "testkey", "testvalue"));
+ srv_ctx.AddTrailingMetadata("testkey", "testvalue");
+
+ ByteBuffer recv_buffer;
+ stream.Read(&recv_buffer, tag(5));
+ Verifier().Expect(5, true).Verify(srv_cq.get());
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_request));
+ EXPECT_EQ(send_request.message(), recv_request.message());
+
+ send_response.set_message(recv_request.message());
+ send_buffer = SerializeToByteBuffer(&send_response);
+ stream.Write(*send_buffer, tag(6));
+ send_buffer.reset();
+ Verifier().Expect(6, true).Verify(srv_cq.get());
+
+ stream.Finish(Status::OK, tag(7));
+ // Shutdown srv_cq before we try to get the tag back, to verify that the
+ // interception API handles completion queue shutdowns that take place before
+ // all the tags are returned
+ srv_cq->Shutdown();
+ Verifier().Expect(7, true).Verify(srv_cq.get());
+
+ recv_buffer.Clear();
+ call->Read(&recv_buffer, tag(8));
+ Verifier().Expect(8, true).Verify(&cli_cq);
+ EXPECT_TRUE(ParseFromByteBuffer(&recv_buffer, &recv_response));
+
+ call->Finish(&recv_status, tag(9));
+ cli_cq.Shutdown();
+ Verifier().Expect(9, true).Verify(&cli_cq);
+
+ EXPECT_EQ(send_response.message(), recv_response.message());
+ EXPECT_TRUE(recv_status.ok());
+ EXPECT_TRUE(CheckMetadata(cli_ctx.GetServerTrailingMetadata(), "testkey",
+ "testvalue"));
+
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+
+ server->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cli_cq.Next(&ignored_tag, &ignored_ok))
+ ;
+ while (srv_cq->Next(&ignored_tag, &ignored_ok))
+ ;
+ // grpc_recycle_unused_port(port);
+}
+
+TEST_F(ServerInterceptorsAsyncEnd2endTest, UnimplementedRpcTest) {
+ DummyInterceptor::Reset();
+ int port = 5009; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
- ServerBuilder builder;
- builder.AddListeningPort(server_address, InsecureServerCredentials());
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- auto cq = builder.AddCompletionQueue();
- auto server = builder.BuildAndStart();
-
- ChannelArguments args;
- std::shared_ptr<Channel> channel =
- grpc::CreateChannel(server_address, InsecureChannelCredentials());
- std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
- stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
- EchoRequest send_request;
- EchoResponse recv_response;
- Status recv_status;
-
- ClientContext cli_ctx;
- send_request.set_message("Hello");
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
- stub->AsyncUnimplemented(&cli_ctx, send_request, cq.get()));
-
- response_reader->Finish(&recv_response, &recv_status, tag(4));
- Verifier().Expect(4, true).Verify(cq.get());
-
- EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
- EXPECT_EQ("", recv_status.error_message());
-
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-
- server->Shutdown();
- cq->Shutdown();
- void* ignored_tag;
- bool ignored_ok;
- while (cq->Next(&ignored_tag, &ignored_ok))
- ;
- // grpc_recycle_unused_port(port);
-}
-
-class ServerInterceptorsSyncUnimplementedEnd2endTest : public ::testing::Test {
-};
-
-TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) {
- DummyInterceptor::Reset();
- int port = 5010; // grpc_pick_unused_port_or_die();
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address, InsecureServerCredentials());
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ auto cq = builder.AddCompletionQueue();
+ auto server = builder.BuildAndStart();
+
+ ChannelArguments args;
+ std::shared_ptr<Channel> channel =
+ grpc::CreateChannel(server_address, InsecureChannelCredentials());
+ std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
+ stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
+ EchoRequest send_request;
+ EchoResponse recv_response;
+ Status recv_status;
+
+ ClientContext cli_ctx;
+ send_request.set_message("Hello");
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader(
+ stub->AsyncUnimplemented(&cli_ctx, send_request, cq.get()));
+
+ response_reader->Finish(&recv_response, &recv_status, tag(4));
+ Verifier().Expect(4, true).Verify(cq.get());
+
+ EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
+ EXPECT_EQ("", recv_status.error_message());
+
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+
+ server->Shutdown();
+ cq->Shutdown();
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cq->Next(&ignored_tag, &ignored_ok))
+ ;
+ // grpc_recycle_unused_port(port);
+}
+
+class ServerInterceptorsSyncUnimplementedEnd2endTest : public ::testing::Test {
+};
+
+TEST_F(ServerInterceptorsSyncUnimplementedEnd2endTest, UnimplementedRpcTest) {
+ DummyInterceptor::Reset();
+ int port = 5010; // grpc_pick_unused_port_or_die();
string server_address = "localhost:" + ::ToString(port);
- ServerBuilder builder;
- TestServiceImpl service;
- builder.RegisterService(&service);
- builder.AddListeningPort(server_address, InsecureServerCredentials());
- std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
- creators;
- creators.reserve(20);
- for (auto i = 0; i < 20; i++) {
- creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
- new DummyInterceptorFactory()));
- }
- builder.experimental().SetInterceptorCreators(std::move(creators));
- auto server = builder.BuildAndStart();
-
- ChannelArguments args;
- std::shared_ptr<Channel> channel =
- grpc::CreateChannel(server_address, InsecureChannelCredentials());
- std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
- stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
- EchoRequest send_request;
- EchoResponse recv_response;
-
- ClientContext cli_ctx;
- send_request.set_message("Hello");
- Status recv_status =
- stub->Unimplemented(&cli_ctx, send_request, &recv_response);
-
- EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
- EXPECT_EQ("", recv_status.error_message());
-
- // Make sure all 20 dummy interceptors were run
- EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
-
- server->Shutdown();
- // grpc_recycle_unused_port(port);
-}
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ ServerBuilder builder;
+ TestServiceImpl service;
+ builder.RegisterService(&service);
+ builder.AddListeningPort(server_address, InsecureServerCredentials());
+ std::vector<std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>
+ creators;
+ creators.reserve(20);
+ for (auto i = 0; i < 20; i++) {
+ creators.push_back(std::unique_ptr<DummyInterceptorFactory>(
+ new DummyInterceptorFactory()));
+ }
+ builder.experimental().SetInterceptorCreators(std::move(creators));
+ auto server = builder.BuildAndStart();
+
+ ChannelArguments args;
+ std::shared_ptr<Channel> channel =
+ grpc::CreateChannel(server_address, InsecureChannelCredentials());
+ std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
+ stub = grpc::testing::UnimplementedEchoService::NewStub(channel);
+ EchoRequest send_request;
+ EchoResponse recv_response;
+
+ ClientContext cli_ctx;
+ send_request.set_message("Hello");
+ Status recv_status =
+ stub->Unimplemented(&cli_ctx, send_request, &recv_response);
+
+ EXPECT_EQ(StatusCode::UNIMPLEMENTED, recv_status.error_code());
+ EXPECT_EQ("", recv_status.error_message());
+
+ // Make sure all 20 dummy interceptors were run
+ EXPECT_EQ(DummyInterceptor::GetNumTimesRun(), 20);
+
+ server->Shutdown();
+ // grpc_recycle_unused_port(port);
+}
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
index d06b74e363..13833cf66c 100644
--- a/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/server_load_reporting_end2end_test.cc
@@ -1,192 +1,192 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/port_platform.h>
-
-#include <thread>
-
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-
-#include <grpc++/grpc++.h>
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/string_util.h>
-#include <grpcpp/ext/server_load_reporting.h>
-#include <grpcpp/server_builder.h>
-
-#include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/support/port_platform.h>
+
+#include <thread>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <grpc++/grpc++.h>
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/string_util.h>
+#include <grpcpp/ext/server_load_reporting.h>
+#include <grpcpp/server_builder.h>
+
+#include "src/proto/grpc/lb/v1/load_reporter.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
-
-namespace grpc {
-namespace testing {
-namespace {
-
-constexpr double kMetricValue = 3.1415;
-constexpr char kMetricName[] = "METRIC_PI";
-
-// Different messages result in different response statuses. For simplicity in
-// computing request bytes, the message sizes should be the same.
-const char kOkMessage[] = "hello";
-const char kServerErrorMessage[] = "sverr";
-const char kClientErrorMessage[] = "clerr";
-
-class EchoTestServiceImpl : public EchoTestService::Service {
- public:
- ~EchoTestServiceImpl() override {}
-
- Status Echo(ServerContext* context, const EchoRequest* request,
- EchoResponse* response) override {
- if (request->message() == kServerErrorMessage) {
- return Status(StatusCode::UNKNOWN, "Server error requested");
- }
- if (request->message() == kClientErrorMessage) {
- return Status(StatusCode::FAILED_PRECONDITION, "Client error requested");
- }
- response->set_message(request->message());
- ::grpc::load_reporter::experimental::AddLoadReportingCost(
- context, kMetricName, kMetricValue);
- return Status::OK;
- }
-};
-
-class ServerLoadReportingEnd2endTest : public ::testing::Test {
- protected:
- void SetUp() override {
- server_address_ =
+
+namespace grpc {
+namespace testing {
+namespace {
+
+constexpr double kMetricValue = 3.1415;
+constexpr char kMetricName[] = "METRIC_PI";
+
+// Different messages result in different response statuses. For simplicity in
+// computing request bytes, the message sizes should be the same.
+const char kOkMessage[] = "hello";
+const char kServerErrorMessage[] = "sverr";
+const char kClientErrorMessage[] = "clerr";
+
+class EchoTestServiceImpl : public EchoTestService::Service {
+ public:
+ ~EchoTestServiceImpl() override {}
+
+ Status Echo(ServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override {
+ if (request->message() == kServerErrorMessage) {
+ return Status(StatusCode::UNKNOWN, "Server error requested");
+ }
+ if (request->message() == kClientErrorMessage) {
+ return Status(StatusCode::FAILED_PRECONDITION, "Client error requested");
+ }
+ response->set_message(request->message());
+ ::grpc::load_reporter::experimental::AddLoadReportingCost(
+ context, kMetricName, kMetricValue);
+ return Status::OK;
+ }
+};
+
+class ServerLoadReportingEnd2endTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ server_address_ =
"localhost:" + ToString(grpc_pick_unused_port_or_die());
- server_ =
- ServerBuilder()
- .AddListeningPort(server_address_, InsecureServerCredentials())
- .RegisterService(&echo_service_)
- .SetOption(std::unique_ptr<::grpc::ServerBuilderOption>(
- new ::grpc::load_reporter::experimental::
- LoadReportingServiceServerBuilderOption()))
- .BuildAndStart();
- server_thread_ =
- std::thread(&ServerLoadReportingEnd2endTest::RunServerLoop, this);
- }
-
- void RunServerLoop() { server_->Wait(); }
-
- void TearDown() override {
- server_->Shutdown();
- server_thread_.join();
- }
-
+ server_ =
+ ServerBuilder()
+ .AddListeningPort(server_address_, InsecureServerCredentials())
+ .RegisterService(&echo_service_)
+ .SetOption(std::unique_ptr<::grpc::ServerBuilderOption>(
+ new ::grpc::load_reporter::experimental::
+ LoadReportingServiceServerBuilderOption()))
+ .BuildAndStart();
+ server_thread_ =
+ std::thread(&ServerLoadReportingEnd2endTest::RunServerLoop, this);
+ }
+
+ void RunServerLoop() { server_->Wait(); }
+
+ void TearDown() override {
+ server_->Shutdown();
+ server_thread_.join();
+ }
+
void ClientMakeEchoCalls(const TString& lb_id, const TString& lb_tag,
const TString& message, size_t num_requests) {
- auto stub = EchoTestService::NewStub(
- grpc::CreateChannel(server_address_, InsecureChannelCredentials()));
+ auto stub = EchoTestService::NewStub(
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials()));
TString lb_token = lb_id + lb_tag;
- for (int i = 0; i < num_requests; ++i) {
- ClientContext ctx;
- if (!lb_token.empty()) ctx.AddMetadata(GRPC_LB_TOKEN_MD_KEY, lb_token);
- EchoRequest request;
- EchoResponse response;
- request.set_message(message);
- Status status = stub->Echo(&ctx, request, &response);
- if (message == kOkMessage) {
- ASSERT_EQ(status.error_code(), StatusCode::OK);
- ASSERT_EQ(request.message(), response.message());
- } else if (message == kServerErrorMessage) {
- ASSERT_EQ(status.error_code(), StatusCode::UNKNOWN);
- } else if (message == kClientErrorMessage) {
- ASSERT_EQ(status.error_code(), StatusCode::FAILED_PRECONDITION);
- }
- }
- }
-
+ for (int i = 0; i < num_requests; ++i) {
+ ClientContext ctx;
+ if (!lb_token.empty()) ctx.AddMetadata(GRPC_LB_TOKEN_MD_KEY, lb_token);
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message(message);
+ Status status = stub->Echo(&ctx, request, &response);
+ if (message == kOkMessage) {
+ ASSERT_EQ(status.error_code(), StatusCode::OK);
+ ASSERT_EQ(request.message(), response.message());
+ } else if (message == kServerErrorMessage) {
+ ASSERT_EQ(status.error_code(), StatusCode::UNKNOWN);
+ } else if (message == kClientErrorMessage) {
+ ASSERT_EQ(status.error_code(), StatusCode::FAILED_PRECONDITION);
+ }
+ }
+ }
+
TString server_address_;
- std::unique_ptr<Server> server_;
- std::thread server_thread_;
- EchoTestServiceImpl echo_service_;
-};
-
-TEST_F(ServerLoadReportingEnd2endTest, NoCall) {}
-
-TEST_F(ServerLoadReportingEnd2endTest, BasicReport) {
- auto channel =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- auto stub = ::grpc::lb::v1::LoadReporter::NewStub(channel);
- ClientContext ctx;
- auto stream = stub->ReportLoad(&ctx);
- ::grpc::lb::v1::LoadReportRequest request;
- request.mutable_initial_request()->set_load_balanced_hostname(
- server_address_);
- request.mutable_initial_request()->set_load_key("LOAD_KEY");
- request.mutable_initial_request()
- ->mutable_load_report_interval()
- ->set_seconds(5);
- stream->Write(request);
- gpr_log(GPR_INFO, "Initial request sent.");
- ::grpc::lb::v1::LoadReportResponse response;
- stream->Read(&response);
+ std::unique_ptr<Server> server_;
+ std::thread server_thread_;
+ EchoTestServiceImpl echo_service_;
+};
+
+TEST_F(ServerLoadReportingEnd2endTest, NoCall) {}
+
+TEST_F(ServerLoadReportingEnd2endTest, BasicReport) {
+ auto channel =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ auto stub = ::grpc::lb::v1::LoadReporter::NewStub(channel);
+ ClientContext ctx;
+ auto stream = stub->ReportLoad(&ctx);
+ ::grpc::lb::v1::LoadReportRequest request;
+ request.mutable_initial_request()->set_load_balanced_hostname(
+ server_address_);
+ request.mutable_initial_request()->set_load_key("LOAD_KEY");
+ request.mutable_initial_request()
+ ->mutable_load_report_interval()
+ ->set_seconds(5);
+ stream->Write(request);
+ gpr_log(GPR_INFO, "Initial request sent.");
+ ::grpc::lb::v1::LoadReportResponse response;
+ stream->Read(&response);
const TString& lb_id = response.initial_response().load_balancer_id();
- gpr_log(GPR_INFO, "Initial response received (lb_id: %s).", lb_id.c_str());
- ClientMakeEchoCalls(lb_id, "LB_TAG", kOkMessage, 1);
- while (true) {
- stream->Read(&response);
- if (!response.load().empty()) {
- ASSERT_EQ(response.load().size(), 3);
- for (const auto& load : response.load()) {
- if (load.in_progress_report_case()) {
- // The special load record that reports the number of in-progress
- // calls.
- ASSERT_EQ(load.num_calls_in_progress(), 1);
- } else if (load.orphaned_load_case()) {
- // The call from the balancer doesn't have any valid LB token.
- ASSERT_EQ(load.orphaned_load_case(), load.kLoadKeyUnknown);
- ASSERT_EQ(load.num_calls_started(), 1);
- ASSERT_EQ(load.num_calls_finished_without_error(), 0);
- ASSERT_EQ(load.num_calls_finished_with_error(), 0);
- } else {
- // This corresponds to the calls from the client.
- ASSERT_EQ(load.num_calls_started(), 1);
- ASSERT_EQ(load.num_calls_finished_without_error(), 1);
- ASSERT_EQ(load.num_calls_finished_with_error(), 0);
- ASSERT_GE(load.total_bytes_received(), sizeof(kOkMessage));
- ASSERT_GE(load.total_bytes_sent(), sizeof(kOkMessage));
- ASSERT_EQ(load.metric_data().size(), 1);
- ASSERT_EQ(load.metric_data().Get(0).metric_name(), kMetricName);
- ASSERT_EQ(load.metric_data().Get(0).num_calls_finished_with_metric(),
- 1);
- ASSERT_EQ(load.metric_data().Get(0).total_metric_value(),
- kMetricValue);
- }
- }
- break;
- }
- }
- stream->WritesDone();
- ASSERT_EQ(stream->Finish().error_code(), StatusCode::CANCELLED);
-}
-
-// TODO(juanlishen): Add more tests.
-
-} // namespace
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
+ gpr_log(GPR_INFO, "Initial response received (lb_id: %s).", lb_id.c_str());
+ ClientMakeEchoCalls(lb_id, "LB_TAG", kOkMessage, 1);
+ while (true) {
+ stream->Read(&response);
+ if (!response.load().empty()) {
+ ASSERT_EQ(response.load().size(), 3);
+ for (const auto& load : response.load()) {
+ if (load.in_progress_report_case()) {
+ // The special load record that reports the number of in-progress
+ // calls.
+ ASSERT_EQ(load.num_calls_in_progress(), 1);
+ } else if (load.orphaned_load_case()) {
+ // The call from the balancer doesn't have any valid LB token.
+ ASSERT_EQ(load.orphaned_load_case(), load.kLoadKeyUnknown);
+ ASSERT_EQ(load.num_calls_started(), 1);
+ ASSERT_EQ(load.num_calls_finished_without_error(), 0);
+ ASSERT_EQ(load.num_calls_finished_with_error(), 0);
+ } else {
+ // This corresponds to the calls from the client.
+ ASSERT_EQ(load.num_calls_started(), 1);
+ ASSERT_EQ(load.num_calls_finished_without_error(), 1);
+ ASSERT_EQ(load.num_calls_finished_with_error(), 0);
+ ASSERT_GE(load.total_bytes_received(), sizeof(kOkMessage));
+ ASSERT_GE(load.total_bytes_sent(), sizeof(kOkMessage));
+ ASSERT_EQ(load.metric_data().size(), 1);
+ ASSERT_EQ(load.metric_data().Get(0).metric_name(), kMetricName);
+ ASSERT_EQ(load.metric_data().Get(0).num_calls_finished_with_metric(),
+ 1);
+ ASSERT_EQ(load.metric_data().Get(0).total_metric_value(),
+ kMetricValue);
+ }
+ }
+ break;
+ }
+ }
+ stream->WritesDone();
+ ASSERT_EQ(stream->Finish().error_code(), StatusCode::CANCELLED);
+}
+
+// TODO(juanlishen): Add more tests.
+
+} // namespace
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
index e980c7ce7c..3aa7a766c4 100644
--- a/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/shutdown_test.cc
@@ -1,170 +1,170 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/sync.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/lib/gpr/env.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/util/test_credentials_provider.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-
-namespace grpc {
-namespace testing {
-
-class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
- public:
- explicit TestServiceImpl(gpr_event* ev) : ev_(ev) {}
-
- Status Echo(ServerContext* context, const EchoRequest* /*request*/,
- EchoResponse* /*response*/) override {
- gpr_event_set(ev_, (void*)1);
- while (!context->IsCancelled()) {
- }
- return Status::OK;
- }
-
- private:
- gpr_event* ev_;
-};
-
-class ShutdownTest : public ::testing::TestWithParam<string> {
- public:
- ShutdownTest() : shutdown_(false), service_(&ev_) { gpr_event_init(&ev_); }
-
- void SetUp() override {
- port_ = grpc_pick_unused_port_or_die();
- server_ = SetUpServer(port_);
- }
-
- std::unique_ptr<Server> SetUpServer(const int port) {
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/lib/gpr/env.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/util/test_credentials_provider.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+
+namespace grpc {
+namespace testing {
+
+class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
+ public:
+ explicit TestServiceImpl(gpr_event* ev) : ev_(ev) {}
+
+ Status Echo(ServerContext* context, const EchoRequest* /*request*/,
+ EchoResponse* /*response*/) override {
+ gpr_event_set(ev_, (void*)1);
+ while (!context->IsCancelled()) {
+ }
+ return Status::OK;
+ }
+
+ private:
+ gpr_event* ev_;
+};
+
+class ShutdownTest : public ::testing::TestWithParam<string> {
+ public:
+ ShutdownTest() : shutdown_(false), service_(&ev_) { gpr_event_init(&ev_); }
+
+ void SetUp() override {
+ port_ = grpc_pick_unused_port_or_die();
+ server_ = SetUpServer(port_);
+ }
+
+ std::unique_ptr<Server> SetUpServer(const int port) {
TString server_address = "localhost:" + to_string(port);
-
- ServerBuilder builder;
- auto server_creds =
- GetCredentialsProvider()->GetServerCredentials(GetParam());
- builder.AddListeningPort(server_address, server_creds);
- builder.RegisterService(&service_);
- std::unique_ptr<Server> server = builder.BuildAndStart();
- return server;
- }
-
- void TearDown() override { GPR_ASSERT(shutdown_); }
-
- void ResetStub() {
- string target = "dns:localhost:" + to_string(port_);
- ChannelArguments args;
- auto channel_creds =
- GetCredentialsProvider()->GetChannelCredentials(GetParam(), &args);
- channel_ = ::grpc::CreateCustomChannel(target, channel_creds, args);
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- string to_string(const int number) {
- std::stringstream strs;
- strs << number;
- return strs.str();
- }
-
- void SendRequest() {
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
- ClientContext context;
- GPR_ASSERT(!shutdown_);
- Status s = stub_->Echo(&context, request, &response);
- GPR_ASSERT(shutdown_);
- }
-
- protected:
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- bool shutdown_;
- int port_;
- gpr_event ev_;
- TestServiceImpl service_;
-};
-
-std::vector<string> GetAllCredentialsTypeList() {
+
+ ServerBuilder builder;
+ auto server_creds =
+ GetCredentialsProvider()->GetServerCredentials(GetParam());
+ builder.AddListeningPort(server_address, server_creds);
+ builder.RegisterService(&service_);
+ std::unique_ptr<Server> server = builder.BuildAndStart();
+ return server;
+ }
+
+ void TearDown() override { GPR_ASSERT(shutdown_); }
+
+ void ResetStub() {
+ string target = "dns:localhost:" + to_string(port_);
+ ChannelArguments args;
+ auto channel_creds =
+ GetCredentialsProvider()->GetChannelCredentials(GetParam(), &args);
+ channel_ = ::grpc::CreateCustomChannel(target, channel_creds, args);
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ string to_string(const int number) {
+ std::stringstream strs;
+ strs << number;
+ return strs.str();
+ }
+
+ void SendRequest() {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+ ClientContext context;
+ GPR_ASSERT(!shutdown_);
+ Status s = stub_->Echo(&context, request, &response);
+ GPR_ASSERT(shutdown_);
+ }
+
+ protected:
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ bool shutdown_;
+ int port_;
+ gpr_event ev_;
+ TestServiceImpl service_;
+};
+
+std::vector<string> GetAllCredentialsTypeList() {
std::vector<TString> credentials_types;
- if (GetCredentialsProvider()->GetChannelCredentials(kInsecureCredentialsType,
- nullptr) != nullptr) {
- credentials_types.push_back(kInsecureCredentialsType);
- }
- auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList();
- for (auto sec = sec_list.begin(); sec != sec_list.end(); sec++) {
- credentials_types.push_back(*sec);
- }
- GPR_ASSERT(!credentials_types.empty());
-
+ if (GetCredentialsProvider()->GetChannelCredentials(kInsecureCredentialsType,
+ nullptr) != nullptr) {
+ credentials_types.push_back(kInsecureCredentialsType);
+ }
+ auto sec_list = GetCredentialsProvider()->GetSecureCredentialsTypeList();
+ for (auto sec = sec_list.begin(); sec != sec_list.end(); sec++) {
+ credentials_types.push_back(*sec);
+ }
+ GPR_ASSERT(!credentials_types.empty());
+
TString credentials_type_list("credentials types:");
- for (const string& type : credentials_types) {
- credentials_type_list.append(" " + type);
- }
- gpr_log(GPR_INFO, "%s", credentials_type_list.c_str());
- return credentials_types;
-}
-
-INSTANTIATE_TEST_SUITE_P(End2EndShutdown, ShutdownTest,
- ::testing::ValuesIn(GetAllCredentialsTypeList()));
-
-// TODO(ctiller): leaked objects in this test
-TEST_P(ShutdownTest, ShutdownTest) {
- ResetStub();
-
- // send the request in a background thread
- std::thread thr(std::bind(&ShutdownTest::SendRequest, this));
-
- // wait for the server to get the event
- gpr_event_wait(&ev_, gpr_inf_future(GPR_CLOCK_MONOTONIC));
-
- shutdown_ = true;
-
- // shutdown should trigger cancellation causing everything to shutdown
- auto deadline =
- std::chrono::system_clock::now() + std::chrono::microseconds(100);
- server_->Shutdown(deadline);
- EXPECT_GE(std::chrono::system_clock::now(), deadline);
-
- thr.join();
-}
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ for (const string& type : credentials_types) {
+ credentials_type_list.append(" " + type);
+ }
+ gpr_log(GPR_INFO, "%s", credentials_type_list.c_str());
+ return credentials_types;
+}
+
+INSTANTIATE_TEST_SUITE_P(End2EndShutdown, ShutdownTest,
+ ::testing::ValuesIn(GetAllCredentialsTypeList()));
+
+// TODO(ctiller): leaked objects in this test
+TEST_P(ShutdownTest, ShutdownTest) {
+ ResetStub();
+
+ // send the request in a background thread
+ std::thread thr(std::bind(&ShutdownTest::SendRequest, this));
+
+ // wait for the server to get the event
+ gpr_event_wait(&ev_, gpr_inf_future(GPR_CLOCK_MONOTONIC));
+
+ shutdown_ = true;
+
+ // shutdown should trigger cancellation causing everything to shutdown
+ auto deadline =
+ std::chrono::system_clock::now() + std::chrono::microseconds(100);
+ server_->Shutdown(deadline);
+ EXPECT_GE(std::chrono::system_clock::now(), deadline);
+
+ thr.join();
+}
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc b/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc
index 1c6f9806f0..f2252063fb 100644
--- a/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/streaming_throughput_test.cc
@@ -1,193 +1,193 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <time.h>
-#include <mutex>
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/atm.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/security/server_credentials.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
-const char* kLargeString =
- "("
- "To be, or not to be- that is the question:"
- "Whether 'tis nobler in the mind to suffer"
- "The slings and arrows of outrageous fortune"
- "Or to take arms against a sea of troubles,"
- "And by opposing end them. To die- to sleep-"
- "No more; and by a sleep to say we end"
- "The heartache, and the thousand natural shock"
- "That flesh is heir to. 'Tis a consummation"
- "Devoutly to be wish'd. To die- to sleep."
- "To sleep- perchance to dream: ay, there's the rub!"
- "For in that sleep of death what dreams may come"
- "When we have shuffled off this mortal coil,"
- "Must give us pause. There's the respect"
- "That makes calamity of so long life."
- "For who would bear the whips and scorns of time,"
- "Th' oppressor's wrong, the proud man's contumely,"
- "The pangs of despis'd love, the law's delay,"
- "The insolence of office, and the spurns"
- "That patient merit of th' unworthy takes,"
- "When he himself might his quietus make"
- "With a bare bodkin? Who would these fardels bear,"
- "To grunt and sweat under a weary life,"
- "But that the dread of something after death-"
- "The undiscover'd country, from whose bourn"
- "No traveller returns- puzzles the will,"
- "And makes us rather bear those ills we have"
- "Than fly to others that we know not of?"
- "Thus conscience does make cowards of us all,"
- "And thus the native hue of resolution"
- "Is sicklied o'er with the pale cast of thought,"
- "And enterprises of great pith and moment"
- "With this regard their currents turn awry"
- "And lose the name of action.- Soft you now!"
- "The fair Ophelia!- Nymph, in thy orisons"
- "Be all my sins rememb'red.";
-
-namespace grpc {
-namespace testing {
-
-class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
- public:
- static void BidiStream_Sender(
- ServerReaderWriter<EchoResponse, EchoRequest>* stream,
- gpr_atm* should_exit) {
- EchoResponse response;
- response.set_message(kLargeString);
- while (gpr_atm_acq_load(should_exit) == static_cast<gpr_atm>(0)) {
- struct timespec tv = {0, 1000000}; // 1 ms
- struct timespec rem;
- // TODO (vpai): Mark this blocking
- while (nanosleep(&tv, &rem) != 0) {
- tv = rem;
- };
-
- stream->Write(response);
- }
- }
-
- // Only implement the one method we will be calling for brevity.
- Status BidiStream(
- ServerContext* /*context*/,
- ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
- EchoRequest request;
- gpr_atm should_exit;
- gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(0));
-
- std::thread sender(
- std::bind(&TestServiceImpl::BidiStream_Sender, stream, &should_exit));
-
- while (stream->Read(&request)) {
- struct timespec tv = {0, 3000000}; // 3 ms
- struct timespec rem;
- // TODO (vpai): Mark this blocking
- while (nanosleep(&tv, &rem) != 0) {
- tv = rem;
- };
- }
- gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(1));
- sender.join();
- return Status::OK;
- }
-};
-
-class End2endTest : public ::testing::Test {
- protected:
- void SetUp() override {
- int port = grpc_pick_unused_port_or_die();
- server_address_ << "localhost:" << port;
- // Setup server
- ServerBuilder builder;
- builder.AddListeningPort(server_address_.str(),
- InsecureServerCredentials());
- builder.RegisterService(&service_);
- server_ = builder.BuildAndStart();
- }
-
- void TearDown() override { server_->Shutdown(); }
-
- void ResetStub() {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
-
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
- std::ostringstream server_address_;
- TestServiceImpl service_;
-};
-
-static void Drainer(ClientReaderWriter<EchoRequest, EchoResponse>* reader) {
- EchoResponse response;
- while (reader->Read(&response)) {
- // Just drain out the responses as fast as possible.
- }
-}
-
-TEST_F(End2endTest, StreamingThroughput) {
- ResetStub();
- grpc::ClientContext context;
- auto stream = stub_->BidiStream(&context);
-
- auto reader = stream.get();
- std::thread receiver(std::bind(Drainer, reader));
-
- for (int i = 0; i < 10000; i++) {
- EchoRequest request;
- request.set_message(kLargeString);
- ASSERT_TRUE(stream->Write(request));
- if (i % 1000 == 0) {
- gpr_log(GPR_INFO, "Send count = %d", i);
- }
- }
- stream->WritesDone();
- receiver.join();
-}
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <time.h>
+#include <mutex>
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpc/support/atm.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
+const char* kLargeString =
+ "("
+ "To be, or not to be- that is the question:"
+ "Whether 'tis nobler in the mind to suffer"
+ "The slings and arrows of outrageous fortune"
+ "Or to take arms against a sea of troubles,"
+ "And by opposing end them. To die- to sleep-"
+ "No more; and by a sleep to say we end"
+ "The heartache, and the thousand natural shock"
+ "That flesh is heir to. 'Tis a consummation"
+ "Devoutly to be wish'd. To die- to sleep."
+ "To sleep- perchance to dream: ay, there's the rub!"
+ "For in that sleep of death what dreams may come"
+ "When we have shuffled off this mortal coil,"
+ "Must give us pause. There's the respect"
+ "That makes calamity of so long life."
+ "For who would bear the whips and scorns of time,"
+ "Th' oppressor's wrong, the proud man's contumely,"
+ "The pangs of despis'd love, the law's delay,"
+ "The insolence of office, and the spurns"
+ "That patient merit of th' unworthy takes,"
+ "When he himself might his quietus make"
+ "With a bare bodkin? Who would these fardels bear,"
+ "To grunt and sweat under a weary life,"
+ "But that the dread of something after death-"
+ "The undiscover'd country, from whose bourn"
+ "No traveller returns- puzzles the will,"
+ "And makes us rather bear those ills we have"
+ "Than fly to others that we know not of?"
+ "Thus conscience does make cowards of us all,"
+ "And thus the native hue of resolution"
+ "Is sicklied o'er with the pale cast of thought,"
+ "And enterprises of great pith and moment"
+ "With this regard their currents turn awry"
+ "And lose the name of action.- Soft you now!"
+ "The fair Ophelia!- Nymph, in thy orisons"
+ "Be all my sins rememb'red.";
+
+namespace grpc {
+namespace testing {
+
+class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
+ public:
+ static void BidiStream_Sender(
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream,
+ gpr_atm* should_exit) {
+ EchoResponse response;
+ response.set_message(kLargeString);
+ while (gpr_atm_acq_load(should_exit) == static_cast<gpr_atm>(0)) {
+ struct timespec tv = {0, 1000000}; // 1 ms
+ struct timespec rem;
+ // TODO (vpai): Mark this blocking
+ while (nanosleep(&tv, &rem) != 0) {
+ tv = rem;
+ };
+
+ stream->Write(response);
+ }
+ }
+
+ // Only implement the one method we will be calling for brevity.
+ Status BidiStream(
+ ServerContext* /*context*/,
+ ServerReaderWriter<EchoResponse, EchoRequest>* stream) override {
+ EchoRequest request;
+ gpr_atm should_exit;
+ gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(0));
+
+ std::thread sender(
+ std::bind(&TestServiceImpl::BidiStream_Sender, stream, &should_exit));
+
+ while (stream->Read(&request)) {
+ struct timespec tv = {0, 3000000}; // 3 ms
+ struct timespec rem;
+ // TODO (vpai): Mark this blocking
+ while (nanosleep(&tv, &rem) != 0) {
+ tv = rem;
+ };
+ }
+ gpr_atm_rel_store(&should_exit, static_cast<gpr_atm>(1));
+ sender.join();
+ return Status::OK;
+ }
+};
+
+class End2endTest : public ::testing::Test {
+ protected:
+ void SetUp() override {
+ int port = grpc_pick_unused_port_or_die();
+ server_address_ << "localhost:" << port;
+ // Setup server
+ ServerBuilder builder;
+ builder.AddListeningPort(server_address_.str(),
+ InsecureServerCredentials());
+ builder.RegisterService(&service_);
+ server_ = builder.BuildAndStart();
+ }
+
+ void TearDown() override { server_->Shutdown(); }
+
+ void ResetStub() {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+ std::ostringstream server_address_;
+ TestServiceImpl service_;
+};
+
+static void Drainer(ClientReaderWriter<EchoRequest, EchoResponse>* reader) {
+ EchoResponse response;
+ while (reader->Read(&response)) {
+ // Just drain out the responses as fast as possible.
+ }
+}
+
+TEST_F(End2endTest, StreamingThroughput) {
+ ResetStub();
+ grpc::ClientContext context;
+ auto stream = stub_->BidiStream(&context);
+
+ auto reader = stream.get();
+ std::thread receiver(std::bind(Drainer, reader));
+
+ for (int i = 0; i < 10000; i++) {
+ EchoRequest request;
+ request.set_message(kLargeString);
+ ASSERT_TRUE(stream->Write(request));
+ if (i % 1000 == 0) {
+ gpr_log(GPR_INFO, "Send count = %d", i);
+ }
+ }
+ stream->WritesDone();
+ receiver.join();
+}
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc
index 225cb2624b..5b212cba31 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.cc
@@ -1,98 +1,98 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/cpp/end2end/test_health_check_service_impl.h"
-
-#include <grpc/grpc.h>
-
-using grpc::health::v1::HealthCheckRequest;
-using grpc::health::v1::HealthCheckResponse;
-
-namespace grpc {
-namespace testing {
-
-Status HealthCheckServiceImpl::Check(ServerContext* /*context*/,
- const HealthCheckRequest* request,
- HealthCheckResponse* response) {
- std::lock_guard<std::mutex> lock(mu_);
- auto iter = status_map_.find(request->service());
- if (iter == status_map_.end()) {
- return Status(StatusCode::NOT_FOUND, "");
- }
- response->set_status(iter->second);
- return Status::OK;
-}
-
-Status HealthCheckServiceImpl::Watch(
- ServerContext* context, const HealthCheckRequest* request,
- ::grpc::ServerWriter<HealthCheckResponse>* writer) {
- auto last_state = HealthCheckResponse::UNKNOWN;
- while (!context->IsCancelled()) {
- {
- std::lock_guard<std::mutex> lock(mu_);
- HealthCheckResponse response;
- auto iter = status_map_.find(request->service());
- if (iter == status_map_.end()) {
- response.set_status(response.SERVICE_UNKNOWN);
- } else {
- response.set_status(iter->second);
- }
- if (response.status() != last_state) {
- writer->Write(response, ::grpc::WriteOptions());
- last_state = response.status();
- }
- }
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
- gpr_time_from_millis(1000, GPR_TIMESPAN)));
- }
- return Status::OK;
-}
-
-void HealthCheckServiceImpl::SetStatus(
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/cpp/end2end/test_health_check_service_impl.h"
+
+#include <grpc/grpc.h>
+
+using grpc::health::v1::HealthCheckRequest;
+using grpc::health::v1::HealthCheckResponse;
+
+namespace grpc {
+namespace testing {
+
+Status HealthCheckServiceImpl::Check(ServerContext* /*context*/,
+ const HealthCheckRequest* request,
+ HealthCheckResponse* response) {
+ std::lock_guard<std::mutex> lock(mu_);
+ auto iter = status_map_.find(request->service());
+ if (iter == status_map_.end()) {
+ return Status(StatusCode::NOT_FOUND, "");
+ }
+ response->set_status(iter->second);
+ return Status::OK;
+}
+
+Status HealthCheckServiceImpl::Watch(
+ ServerContext* context, const HealthCheckRequest* request,
+ ::grpc::ServerWriter<HealthCheckResponse>* writer) {
+ auto last_state = HealthCheckResponse::UNKNOWN;
+ while (!context->IsCancelled()) {
+ {
+ std::lock_guard<std::mutex> lock(mu_);
+ HealthCheckResponse response;
+ auto iter = status_map_.find(request->service());
+ if (iter == status_map_.end()) {
+ response.set_status(response.SERVICE_UNKNOWN);
+ } else {
+ response.set_status(iter->second);
+ }
+ if (response.status() != last_state) {
+ writer->Write(response, ::grpc::WriteOptions());
+ last_state = response.status();
+ }
+ }
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ gpr_time_from_millis(1000, GPR_TIMESPAN)));
+ }
+ return Status::OK;
+}
+
+void HealthCheckServiceImpl::SetStatus(
const TString& service_name,
- HealthCheckResponse::ServingStatus status) {
- std::lock_guard<std::mutex> lock(mu_);
- if (shutdown_) {
- status = HealthCheckResponse::NOT_SERVING;
- }
- status_map_[service_name] = status;
-}
-
-void HealthCheckServiceImpl::SetAll(HealthCheckResponse::ServingStatus status) {
- std::lock_guard<std::mutex> lock(mu_);
- if (shutdown_) {
- return;
- }
- for (auto iter = status_map_.begin(); iter != status_map_.end(); ++iter) {
- iter->second = status;
- }
-}
-
-void HealthCheckServiceImpl::Shutdown() {
- std::lock_guard<std::mutex> lock(mu_);
- if (shutdown_) {
- return;
- }
- shutdown_ = true;
- for (auto iter = status_map_.begin(); iter != status_map_.end(); ++iter) {
- iter->second = HealthCheckResponse::NOT_SERVING;
- }
-}
-
-} // namespace testing
-} // namespace grpc
+ HealthCheckResponse::ServingStatus status) {
+ std::lock_guard<std::mutex> lock(mu_);
+ if (shutdown_) {
+ status = HealthCheckResponse::NOT_SERVING;
+ }
+ status_map_[service_name] = status;
+}
+
+void HealthCheckServiceImpl::SetAll(HealthCheckResponse::ServingStatus status) {
+ std::lock_guard<std::mutex> lock(mu_);
+ if (shutdown_) {
+ return;
+ }
+ for (auto iter = status_map_.begin(); iter != status_map_.end(); ++iter) {
+ iter->second = status;
+ }
+}
+
+void HealthCheckServiceImpl::Shutdown() {
+ std::lock_guard<std::mutex> lock(mu_);
+ if (shutdown_) {
+ return;
+ }
+ shutdown_ = true;
+ for (auto iter = status_map_.begin(); iter != status_map_.end(); ++iter) {
+ iter->second = HealthCheckResponse::NOT_SERVING;
+ }
+}
+
+} // namespace testing
+} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h
index 3bb4d4f9ca..d370e4693a 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h
+++ b/contrib/libs/grpc/test/cpp/end2end/test_health_check_service_impl.h
@@ -1,58 +1,58 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-#ifndef GRPC_TEST_CPP_END2END_TEST_HEALTH_CHECK_SERVICE_IMPL_H
-#define GRPC_TEST_CPP_END2END_TEST_HEALTH_CHECK_SERVICE_IMPL_H
-
-#include <map>
-#include <mutex>
-
-#include <grpcpp/server_context.h>
-#include <grpcpp/support/status.h>
-
-#include "src/proto/grpc/health/v1/health.grpc.pb.h"
-
-namespace grpc {
-namespace testing {
-
-// A sample sync implementation of the health checking service. This does the
-// same thing as the default one.
-class HealthCheckServiceImpl : public health::v1::Health::Service {
- public:
- Status Check(ServerContext* context,
- const health::v1::HealthCheckRequest* request,
- health::v1::HealthCheckResponse* response) override;
- Status Watch(ServerContext* context,
- const health::v1::HealthCheckRequest* request,
- ServerWriter<health::v1::HealthCheckResponse>* writer) override;
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+#ifndef GRPC_TEST_CPP_END2END_TEST_HEALTH_CHECK_SERVICE_IMPL_H
+#define GRPC_TEST_CPP_END2END_TEST_HEALTH_CHECK_SERVICE_IMPL_H
+
+#include <map>
+#include <mutex>
+
+#include <grpcpp/server_context.h>
+#include <grpcpp/support/status.h>
+
+#include "src/proto/grpc/health/v1/health.grpc.pb.h"
+
+namespace grpc {
+namespace testing {
+
+// A sample sync implementation of the health checking service. This does the
+// same thing as the default one.
+class HealthCheckServiceImpl : public health::v1::Health::Service {
+ public:
+ Status Check(ServerContext* context,
+ const health::v1::HealthCheckRequest* request,
+ health::v1::HealthCheckResponse* response) override;
+ Status Watch(ServerContext* context,
+ const health::v1::HealthCheckRequest* request,
+ ServerWriter<health::v1::HealthCheckResponse>* writer) override;
void SetStatus(const TString& service_name,
- health::v1::HealthCheckResponse::ServingStatus status);
- void SetAll(health::v1::HealthCheckResponse::ServingStatus status);
-
- void Shutdown();
-
- private:
- std::mutex mu_;
- bool shutdown_ = false;
+ health::v1::HealthCheckResponse::ServingStatus status);
+ void SetAll(health::v1::HealthCheckResponse::ServingStatus status);
+
+ void Shutdown();
+
+ private:
+ std::mutex mu_;
+ bool shutdown_ = false;
std::map<const TString, health::v1::HealthCheckResponse::ServingStatus>
- status_map_;
-};
-
-} // namespace testing
-} // namespace grpc
-
-#endif // GRPC_TEST_CPP_END2END_TEST_HEALTH_CHECK_SERVICE_IMPL_H
+ status_map_;
+};
+
+} // namespace testing
+} // namespace grpc
+
+#endif // GRPC_TEST_CPP_END2END_TEST_HEALTH_CHECK_SERVICE_IMPL_H
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc
index 55821905e4..078977e824 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.cc
@@ -1,144 +1,144 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test/cpp/end2end/test_service_impl.h"
-
-#include <grpc/support/log.h>
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/cpp/end2end/test_service_impl.h"
+
+#include <grpc/support/log.h>
#include <grpcpp/alarm.h>
-#include <grpcpp/security/credentials.h>
-#include <grpcpp/server_context.h>
-#include <gtest/gtest.h>
-
+#include <grpcpp/security/credentials.h>
+#include <grpcpp/server_context.h>
+#include <gtest/gtest.h>
+
#include <util/generic/string.h>
-#include <thread>
-
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/cpp/util/string_ref_helper.h"
-
-using std::chrono::system_clock;
-
-namespace grpc {
-namespace testing {
+#include <thread>
+
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/cpp/util/string_ref_helper.h"
+
+using std::chrono::system_clock;
+
+namespace grpc {
+namespace testing {
namespace internal {
-
-// When echo_deadline is requested, deadline seen in the ServerContext is set in
-// the response in seconds.
-void MaybeEchoDeadline(experimental::ServerContextBase* context,
- const EchoRequest* request, EchoResponse* response) {
- if (request->has_param() && request->param().echo_deadline()) {
- gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
- if (context->deadline() != system_clock::time_point::max()) {
- Timepoint2Timespec(context->deadline(), &deadline);
- }
- response->mutable_param()->set_request_deadline(deadline.tv_sec);
- }
-}
-
+
+// When echo_deadline is requested, deadline seen in the ServerContext is set in
+// the response in seconds.
+void MaybeEchoDeadline(experimental::ServerContextBase* context,
+ const EchoRequest* request, EchoResponse* response) {
+ if (request->has_param() && request->param().echo_deadline()) {
+ gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+ if (context->deadline() != system_clock::time_point::max()) {
+ Timepoint2Timespec(context->deadline(), &deadline);
+ }
+ response->mutable_param()->set_request_deadline(deadline.tv_sec);
+ }
+}
+
void CheckServerAuthContext(const experimental::ServerContextBase* context,
const TString& expected_transport_security_type,
const TString& expected_client_identity) {
- std::shared_ptr<const AuthContext> auth_ctx = context->auth_context();
- std::vector<grpc::string_ref> tst =
- auth_ctx->FindPropertyValues("transport_security_type");
- EXPECT_EQ(1u, tst.size());
- EXPECT_EQ(expected_transport_security_type.c_str(), ToString(tst[0]));
- if (expected_client_identity.empty()) {
- EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty());
- EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty());
- EXPECT_FALSE(auth_ctx->IsPeerAuthenticated());
- } else {
- auto identity = auth_ctx->GetPeerIdentity();
- EXPECT_TRUE(auth_ctx->IsPeerAuthenticated());
- EXPECT_EQ(1u, identity.size());
+ std::shared_ptr<const AuthContext> auth_ctx = context->auth_context();
+ std::vector<grpc::string_ref> tst =
+ auth_ctx->FindPropertyValues("transport_security_type");
+ EXPECT_EQ(1u, tst.size());
+ EXPECT_EQ(expected_transport_security_type.c_str(), ToString(tst[0]));
+ if (expected_client_identity.empty()) {
+ EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty());
+ EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty());
+ EXPECT_FALSE(auth_ctx->IsPeerAuthenticated());
+ } else {
+ auto identity = auth_ctx->GetPeerIdentity();
+ EXPECT_TRUE(auth_ctx->IsPeerAuthenticated());
+ EXPECT_EQ(1u, identity.size());
EXPECT_EQ(expected_client_identity.c_str(), ToString(identity[0]));
- }
-}
-
-// Returns the number of pairs in metadata that exactly match the given
-// key-value pair. Returns -1 if the pair wasn't found.
-int MetadataMatchCount(
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ }
+}
+
+// Returns the number of pairs in metadata that exactly match the given
+// key-value pair. Returns -1 if the pair wasn't found.
+int MetadataMatchCount(
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
const TString& key, const TString& value) {
- int count = 0;
- for (const auto& metadatum : metadata) {
- if (ToString(metadatum.first) == key &&
- ToString(metadatum.second) == value) {
- count++;
- }
- }
- return count;
-}
-
-int GetIntValueFromMetadataHelper(
- const char* key,
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- int default_value) {
- if (metadata.find(key) != metadata.end()) {
- std::istringstream iss(ToString(metadata.find(key)->second));
- iss >> default_value;
- gpr_log(GPR_INFO, "%s : %d", key, default_value);
- }
-
- return default_value;
-}
-
-int GetIntValueFromMetadata(
- const char* key,
- const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
- int default_value) {
- return GetIntValueFromMetadataHelper(key, metadata, default_value);
-}
-
-void ServerTryCancel(ServerContext* context) {
- EXPECT_FALSE(context->IsCancelled());
- context->TryCancel();
- gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request");
- // Now wait until it's really canceled
- while (!context->IsCancelled()) {
- gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(1000, GPR_TIMESPAN)));
- }
-}
-
-void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) {
- EXPECT_FALSE(context->IsCancelled());
- context->TryCancel();
+ int count = 0;
+ for (const auto& metadatum : metadata) {
+ if (ToString(metadatum.first) == key &&
+ ToString(metadatum.second) == value) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int GetIntValueFromMetadataHelper(
+ const char* key,
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ int default_value) {
+ if (metadata.find(key) != metadata.end()) {
+ std::istringstream iss(ToString(metadata.find(key)->second));
+ iss >> default_value;
+ gpr_log(GPR_INFO, "%s : %d", key, default_value);
+ }
+
+ return default_value;
+}
+
+int GetIntValueFromMetadata(
+ const char* key,
+ const std::multimap<grpc::string_ref, grpc::string_ref>& metadata,
+ int default_value) {
+ return GetIntValueFromMetadataHelper(key, metadata, default_value);
+}
+
+void ServerTryCancel(ServerContext* context) {
+ EXPECT_FALSE(context->IsCancelled());
+ context->TryCancel();
+ gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request");
+ // Now wait until it's really canceled
+ while (!context->IsCancelled()) {
+ gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(1000, GPR_TIMESPAN)));
+ }
+}
+
+void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) {
+ EXPECT_FALSE(context->IsCancelled());
+ context->TryCancel();
gpr_log(GPR_INFO,
"Server called TryCancelNonblocking() to cancel the request");
-}
-
+}
+
} // namespace internal
-
-experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
- experimental::CallbackServerContext* context, const EchoRequest* request,
- EchoResponse* response) {
- class Reactor : public ::grpc::experimental::ServerUnaryReactor {
- public:
- Reactor(CallbackTestServiceImpl* service,
- experimental::CallbackServerContext* ctx,
- const EchoRequest* request, EchoResponse* response)
- : service_(service), ctx_(ctx), req_(request), resp_(response) {
- // It should be safe to call IsCancelled here, even though we don't know
- // the result. Call it asynchronously to see if we trigger any data races.
+
+experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
+ experimental::CallbackServerContext* context, const EchoRequest* request,
+ EchoResponse* response) {
+ class Reactor : public ::grpc::experimental::ServerUnaryReactor {
+ public:
+ Reactor(CallbackTestServiceImpl* service,
+ experimental::CallbackServerContext* ctx,
+ const EchoRequest* request, EchoResponse* response)
+ : service_(service), ctx_(ctx), req_(request), resp_(response) {
+ // It should be safe to call IsCancelled here, even though we don't know
+ // the result. Call it asynchronously to see if we trigger any data races.
// Join it in OnDone (technically that could be blocking but shouldn't be
// for very long).
- async_cancel_check_ = std::thread([this] { (void)ctx_->IsCancelled(); });
-
+ async_cancel_check_ = std::thread([this] { (void)ctx_->IsCancelled(); });
+
started_ = true;
if (request->has_param() &&
@@ -158,349 +158,349 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo(
void StartRpc() {
if (req_->has_param() && req_->param().server_sleep_us() > 0) {
- // Set an alarm for that much time
- alarm_.experimental().Set(
- gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+ // Set an alarm for that much time
+ alarm_.experimental().Set(
+ gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_micros(req_->param().server_sleep_us(),
GPR_TIMESPAN)),
- [this](bool ok) { NonDelayed(ok); });
+ [this](bool ok) { NonDelayed(ok); });
return;
- }
+ }
NonDelayed(true);
- }
- void OnSendInitialMetadataDone(bool ok) override {
- EXPECT_TRUE(ok);
- initial_metadata_sent_ = true;
- }
- void OnCancel() override {
- EXPECT_TRUE(started_);
- EXPECT_TRUE(ctx_->IsCancelled());
- on_cancel_invoked_ = true;
+ }
+ void OnSendInitialMetadataDone(bool ok) override {
+ EXPECT_TRUE(ok);
+ initial_metadata_sent_ = true;
+ }
+ void OnCancel() override {
+ EXPECT_TRUE(started_);
+ EXPECT_TRUE(ctx_->IsCancelled());
+ on_cancel_invoked_ = true;
std::lock_guard<std::mutex> l(cancel_mu_);
cancel_cv_.notify_one();
- }
- void OnDone() override {
- if (req_->has_param() && req_->param().echo_metadata_initially()) {
- EXPECT_TRUE(initial_metadata_sent_);
- }
- EXPECT_EQ(ctx_->IsCancelled(), on_cancel_invoked_);
+ }
+ void OnDone() override {
+ if (req_->has_param() && req_->param().echo_metadata_initially()) {
+ EXPECT_TRUE(initial_metadata_sent_);
+ }
+ EXPECT_EQ(ctx_->IsCancelled(), on_cancel_invoked_);
// Validate that finishing with a non-OK status doesn't cause cancellation
if (req_->has_param() && req_->param().has_expected_error()) {
EXPECT_FALSE(on_cancel_invoked_);
}
- async_cancel_check_.join();
+ async_cancel_check_.join();
if (rpc_wait_thread_.joinable()) {
rpc_wait_thread_.join();
}
if (finish_when_cancelled_.joinable()) {
finish_when_cancelled_.join();
}
- delete this;
- }
-
- private:
- void NonDelayed(bool ok) {
- if (!ok) {
- EXPECT_TRUE(ctx_->IsCancelled());
- Finish(Status::CANCELLED);
- return;
- }
- if (req_->has_param() && req_->param().server_die()) {
- gpr_log(GPR_ERROR, "The request should not reach application handler.");
- GPR_ASSERT(0);
- }
- if (req_->has_param() && req_->param().has_expected_error()) {
- const auto& error = req_->param().expected_error();
- Finish(Status(static_cast<StatusCode>(error.code()),
- error.error_message(), error.binary_error_details()));
- return;
- }
+ delete this;
+ }
+
+ private:
+ void NonDelayed(bool ok) {
+ if (!ok) {
+ EXPECT_TRUE(ctx_->IsCancelled());
+ Finish(Status::CANCELLED);
+ return;
+ }
+ if (req_->has_param() && req_->param().server_die()) {
+ gpr_log(GPR_ERROR, "The request should not reach application handler.");
+ GPR_ASSERT(0);
+ }
+ if (req_->has_param() && req_->param().has_expected_error()) {
+ const auto& error = req_->param().expected_error();
+ Finish(Status(static_cast<StatusCode>(error.code()),
+ error.error_message(), error.binary_error_details()));
+ return;
+ }
int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, ctx_->client_metadata(), DO_NOT_CANCEL);
- if (server_try_cancel != DO_NOT_CANCEL) {
- // Since this is a unary RPC, by the time this server handler is called,
- // the 'request' message is already read from the client. So the
- // scenarios in server_try_cancel don't make much sense. Just cancel the
- // RPC as long as server_try_cancel is not DO_NOT_CANCEL
- EXPECT_FALSE(ctx_->IsCancelled());
- ctx_->TryCancel();
- gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request");
+ kServerTryCancelRequest, ctx_->client_metadata(), DO_NOT_CANCEL);
+ if (server_try_cancel != DO_NOT_CANCEL) {
+ // Since this is a unary RPC, by the time this server handler is called,
+ // the 'request' message is already read from the client. So the
+ // scenarios in server_try_cancel don't make much sense. Just cancel the
+ // RPC as long as server_try_cancel is not DO_NOT_CANCEL
+ EXPECT_FALSE(ctx_->IsCancelled());
+ ctx_->TryCancel();
+ gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request");
FinishWhenCancelledAsync();
- return;
- }
- resp_->set_message(req_->message());
+ return;
+ }
+ resp_->set_message(req_->message());
internal::MaybeEchoDeadline(ctx_, req_, resp_);
- if (service_->host_) {
- resp_->mutable_param()->set_host(*service_->host_);
- }
- if (req_->has_param() && req_->param().client_cancel_after_us()) {
- {
- std::unique_lock<std::mutex> lock(service_->mu_);
- service_->signal_client_ = true;
- }
+ if (service_->host_) {
+ resp_->mutable_param()->set_host(*service_->host_);
+ }
+ if (req_->has_param() && req_->param().client_cancel_after_us()) {
+ {
+ std::unique_lock<std::mutex> lock(service_->mu_);
+ service_->signal_client_ = true;
+ }
FinishWhenCancelledAsync();
- return;
- } else if (req_->has_param() && req_->param().server_cancel_after_us()) {
- alarm_.experimental().Set(
- gpr_time_add(
- gpr_now(GPR_CLOCK_REALTIME),
- gpr_time_from_micros(req_->param().server_cancel_after_us(),
- GPR_TIMESPAN)),
- [this](bool) { Finish(Status::CANCELLED); });
- return;
- } else if (!req_->has_param() || !req_->param().skip_cancelled_check()) {
- EXPECT_FALSE(ctx_->IsCancelled());
- }
-
- if (req_->has_param() && req_->param().echo_metadata_initially()) {
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- client_metadata = ctx_->client_metadata();
- for (const auto& metadatum : client_metadata) {
- ctx_->AddInitialMetadata(ToString(metadatum.first),
- ToString(metadatum.second));
- }
- StartSendInitialMetadata();
- }
-
- if (req_->has_param() && req_->param().echo_metadata()) {
- const std::multimap<grpc::string_ref, grpc::string_ref>&
- client_metadata = ctx_->client_metadata();
- for (const auto& metadatum : client_metadata) {
- ctx_->AddTrailingMetadata(ToString(metadatum.first),
- ToString(metadatum.second));
- }
- // Terminate rpc with error and debug info in trailer.
- if (req_->param().debug_info().stack_entries_size() ||
- !req_->param().debug_info().detail().empty()) {
+ return;
+ } else if (req_->has_param() && req_->param().server_cancel_after_us()) {
+ alarm_.experimental().Set(
+ gpr_time_add(
+ gpr_now(GPR_CLOCK_REALTIME),
+ gpr_time_from_micros(req_->param().server_cancel_after_us(),
+ GPR_TIMESPAN)),
+ [this](bool) { Finish(Status::CANCELLED); });
+ return;
+ } else if (!req_->has_param() || !req_->param().skip_cancelled_check()) {
+ EXPECT_FALSE(ctx_->IsCancelled());
+ }
+
+ if (req_->has_param() && req_->param().echo_metadata_initially()) {
+ const std::multimap<grpc::string_ref, grpc::string_ref>&
+ client_metadata = ctx_->client_metadata();
+ for (const auto& metadatum : client_metadata) {
+ ctx_->AddInitialMetadata(ToString(metadatum.first),
+ ToString(metadatum.second));
+ }
+ StartSendInitialMetadata();
+ }
+
+ if (req_->has_param() && req_->param().echo_metadata()) {
+ const std::multimap<grpc::string_ref, grpc::string_ref>&
+ client_metadata = ctx_->client_metadata();
+ for (const auto& metadatum : client_metadata) {
+ ctx_->AddTrailingMetadata(ToString(metadatum.first),
+ ToString(metadatum.second));
+ }
+ // Terminate rpc with error and debug info in trailer.
+ if (req_->param().debug_info().stack_entries_size() ||
+ !req_->param().debug_info().detail().empty()) {
TString serialized_debug_info =
- req_->param().debug_info().SerializeAsString();
- ctx_->AddTrailingMetadata(kDebugInfoTrailerKey,
- serialized_debug_info);
- Finish(Status::CANCELLED);
- return;
- }
- }
- if (req_->has_param() &&
- (req_->param().expected_client_identity().length() > 0 ||
- req_->param().check_auth_context())) {
+ req_->param().debug_info().SerializeAsString();
+ ctx_->AddTrailingMetadata(kDebugInfoTrailerKey,
+ serialized_debug_info);
+ Finish(Status::CANCELLED);
+ return;
+ }
+ }
+ if (req_->has_param() &&
+ (req_->param().expected_client_identity().length() > 0 ||
+ req_->param().check_auth_context())) {
internal::CheckServerAuthContext(
ctx_, req_->param().expected_transport_security_type(),
req_->param().expected_client_identity());
- }
- if (req_->has_param() && req_->param().response_message_length() > 0) {
- resp_->set_message(
+ }
+ if (req_->has_param() && req_->param().response_message_length() > 0) {
+ resp_->set_message(
TString(req_->param().response_message_length(), '\0'));
- }
- if (req_->has_param() && req_->param().echo_peer()) {
- resp_->mutable_param()->set_peer(ctx_->peer().c_str());
- }
- Finish(Status::OK);
- }
+ }
+ if (req_->has_param() && req_->param().echo_peer()) {
+ resp_->mutable_param()->set_peer(ctx_->peer().c_str());
+ }
+ Finish(Status::OK);
+ }
void FinishWhenCancelledAsync() {
finish_when_cancelled_ = std::thread([this] {
std::unique_lock<std::mutex> l(cancel_mu_);
cancel_cv_.wait(l, [this] { return ctx_->IsCancelled(); });
- Finish(Status::CANCELLED);
+ Finish(Status::CANCELLED);
});
- }
-
- CallbackTestServiceImpl* const service_;
- experimental::CallbackServerContext* const ctx_;
- const EchoRequest* const req_;
- EchoResponse* const resp_;
- Alarm alarm_;
+ }
+
+ CallbackTestServiceImpl* const service_;
+ experimental::CallbackServerContext* const ctx_;
+ const EchoRequest* const req_;
+ EchoResponse* const resp_;
+ Alarm alarm_;
std::mutex cancel_mu_;
std::condition_variable cancel_cv_;
bool initial_metadata_sent_ = false;
bool started_ = false;
bool on_cancel_invoked_ = false;
- std::thread async_cancel_check_;
+ std::thread async_cancel_check_;
std::thread rpc_wait_thread_;
std::thread finish_when_cancelled_;
- };
-
- return new Reactor(this, context, request, response);
-}
-
-experimental::ServerUnaryReactor*
-CallbackTestServiceImpl::CheckClientInitialMetadata(
+ };
+
+ return new Reactor(this, context, request, response);
+}
+
+experimental::ServerUnaryReactor*
+CallbackTestServiceImpl::CheckClientInitialMetadata(
experimental::CallbackServerContext* context, const SimpleRequest42*,
SimpleResponse42*) {
- class Reactor : public ::grpc::experimental::ServerUnaryReactor {
- public:
- explicit Reactor(experimental::CallbackServerContext* ctx) {
+ class Reactor : public ::grpc::experimental::ServerUnaryReactor {
+ public:
+ explicit Reactor(experimental::CallbackServerContext* ctx) {
EXPECT_EQ(internal::MetadataMatchCount(ctx->client_metadata(),
kCheckClientInitialMetadataKey,
kCheckClientInitialMetadataVal),
- 1);
- EXPECT_EQ(ctx->client_metadata().count(kCheckClientInitialMetadataKey),
- 1u);
- Finish(Status::OK);
- }
- void OnDone() override { delete this; }
- };
-
- return new Reactor(context);
-}
-
-experimental::ServerReadReactor<EchoRequest>*
-CallbackTestServiceImpl::RequestStream(
- experimental::CallbackServerContext* context, EchoResponse* response) {
- // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
- // the server by calling ServerContext::TryCancel() depending on the
- // value:
- // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server
- // reads any message from the client CANCEL_DURING_PROCESSING: The RPC
- // is cancelled while the server is reading messages from the client
- // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
- // all the messages from the client
+ 1);
+ EXPECT_EQ(ctx->client_metadata().count(kCheckClientInitialMetadataKey),
+ 1u);
+ Finish(Status::OK);
+ }
+ void OnDone() override { delete this; }
+ };
+
+ return new Reactor(context);
+}
+
+experimental::ServerReadReactor<EchoRequest>*
+CallbackTestServiceImpl::RequestStream(
+ experimental::CallbackServerContext* context, EchoResponse* response) {
+ // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
+ // the server by calling ServerContext::TryCancel() depending on the
+ // value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server
+ // reads any message from the client CANCEL_DURING_PROCESSING: The RPC
+ // is cancelled while the server is reading messages from the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
+ // all the messages from the client
int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
internal::ServerTryCancelNonblocking(context);
- // Don't need to provide a reactor since the RPC is canceled
- return nullptr;
- }
-
- class Reactor : public ::grpc::experimental::ServerReadReactor<EchoRequest> {
- public:
- Reactor(experimental::CallbackServerContext* ctx, EchoResponse* response,
- int server_try_cancel)
- : ctx_(ctx),
- response_(response),
- server_try_cancel_(server_try_cancel) {
- EXPECT_NE(server_try_cancel, CANCEL_BEFORE_PROCESSING);
- response->set_message("");
-
- if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
- ctx->TryCancel();
- // Don't wait for it here
- }
- StartRead(&request_);
- setup_done_ = true;
- }
- void OnDone() override { delete this; }
- void OnCancel() override {
- EXPECT_TRUE(setup_done_);
- EXPECT_TRUE(ctx_->IsCancelled());
- FinishOnce(Status::CANCELLED);
- }
- void OnReadDone(bool ok) override {
- if (ok) {
- response_->mutable_message()->append(request_.message());
- num_msgs_read_++;
- StartRead(&request_);
- } else {
- gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read_);
-
- if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
- // Let OnCancel recover this
- return;
- }
- if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
+ // Don't need to provide a reactor since the RPC is canceled
+ return nullptr;
+ }
+
+ class Reactor : public ::grpc::experimental::ServerReadReactor<EchoRequest> {
+ public:
+ Reactor(experimental::CallbackServerContext* ctx, EchoResponse* response,
+ int server_try_cancel)
+ : ctx_(ctx),
+ response_(response),
+ server_try_cancel_(server_try_cancel) {
+ EXPECT_NE(server_try_cancel, CANCEL_BEFORE_PROCESSING);
+ response->set_message("");
+
+ if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
+ ctx->TryCancel();
+ // Don't wait for it here
+ }
+ StartRead(&request_);
+ setup_done_ = true;
+ }
+ void OnDone() override { delete this; }
+ void OnCancel() override {
+ EXPECT_TRUE(setup_done_);
+ EXPECT_TRUE(ctx_->IsCancelled());
+ FinishOnce(Status::CANCELLED);
+ }
+ void OnReadDone(bool ok) override {
+ if (ok) {
+ response_->mutable_message()->append(request_.message());
+ num_msgs_read_++;
+ StartRead(&request_);
+ } else {
+ gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read_);
+
+ if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
+ // Let OnCancel recover this
+ return;
+ }
+ if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
internal::ServerTryCancelNonblocking(ctx_);
- return;
- }
- FinishOnce(Status::OK);
- }
- }
-
- private:
- void FinishOnce(const Status& s) {
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- Finish(s);
- finished_ = true;
- }
- }
-
- experimental::CallbackServerContext* const ctx_;
- EchoResponse* const response_;
- EchoRequest request_;
- int num_msgs_read_{0};
- int server_try_cancel_;
- std::mutex finish_mu_;
- bool finished_{false};
- bool setup_done_{false};
- };
-
- return new Reactor(context, response, server_try_cancel);
-}
-
-// Return 'kNumResponseStreamMsgs' messages.
-// TODO(yangg) make it generic by adding a parameter into EchoRequest
-experimental::ServerWriteReactor<EchoResponse>*
-CallbackTestServiceImpl::ResponseStream(
- experimental::CallbackServerContext* context, const EchoRequest* request) {
- // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
- // the server by calling ServerContext::TryCancel() depending on the
- // value:
- // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server
- // reads any message from the client CANCEL_DURING_PROCESSING: The RPC
- // is cancelled while the server is reading messages from the client
- // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
- // all the messages from the client
+ return;
+ }
+ FinishOnce(Status::OK);
+ }
+ }
+
+ private:
+ void FinishOnce(const Status& s) {
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ Finish(s);
+ finished_ = true;
+ }
+ }
+
+ experimental::CallbackServerContext* const ctx_;
+ EchoResponse* const response_;
+ EchoRequest request_;
+ int num_msgs_read_{0};
+ int server_try_cancel_;
+ std::mutex finish_mu_;
+ bool finished_{false};
+ bool setup_done_{false};
+ };
+
+ return new Reactor(context, response, server_try_cancel);
+}
+
+// Return 'kNumResponseStreamMsgs' messages.
+// TODO(yangg) make it generic by adding a parameter into EchoRequest
+experimental::ServerWriteReactor<EchoResponse>*
+CallbackTestServiceImpl::ResponseStream(
+ experimental::CallbackServerContext* context, const EchoRequest* request) {
+ // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
+ // the server by calling ServerContext::TryCancel() depending on the
+ // value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server
+ // reads any message from the client CANCEL_DURING_PROCESSING: The RPC
+ // is cancelled while the server is reading messages from the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
+ // all the messages from the client
int server_try_cancel = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
- if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
+ kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
+ if (server_try_cancel == CANCEL_BEFORE_PROCESSING) {
internal::ServerTryCancelNonblocking(context);
- }
-
- class Reactor
- : public ::grpc::experimental::ServerWriteReactor<EchoResponse> {
- public:
- Reactor(experimental::CallbackServerContext* ctx,
- const EchoRequest* request, int server_try_cancel)
- : ctx_(ctx), request_(request), server_try_cancel_(server_try_cancel) {
+ }
+
+ class Reactor
+ : public ::grpc::experimental::ServerWriteReactor<EchoResponse> {
+ public:
+ Reactor(experimental::CallbackServerContext* ctx,
+ const EchoRequest* request, int server_try_cancel)
+ : ctx_(ctx), request_(request), server_try_cancel_(server_try_cancel) {
server_coalescing_api_ = internal::GetIntValueFromMetadata(
- kServerUseCoalescingApi, ctx->client_metadata(), 0);
+ kServerUseCoalescingApi, ctx->client_metadata(), 0);
server_responses_to_send_ = internal::GetIntValueFromMetadata(
- kServerResponseStreamsToSend, ctx->client_metadata(),
- kServerDefaultResponseStreamsToSend);
- if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
- ctx->TryCancel();
- }
- if (server_try_cancel_ != CANCEL_BEFORE_PROCESSING) {
- if (num_msgs_sent_ < server_responses_to_send_) {
- NextWrite();
- }
- }
- setup_done_ = true;
- }
- void OnDone() override { delete this; }
- void OnCancel() override {
- EXPECT_TRUE(setup_done_);
- EXPECT_TRUE(ctx_->IsCancelled());
- FinishOnce(Status::CANCELLED);
- }
- void OnWriteDone(bool /*ok*/) override {
- if (num_msgs_sent_ < server_responses_to_send_) {
- NextWrite();
- } else if (server_coalescing_api_ != 0) {
- // We would have already done Finish just after the WriteLast
- } else if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
- // Let OnCancel recover this
- } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
+ kServerResponseStreamsToSend, ctx->client_metadata(),
+ kServerDefaultResponseStreamsToSend);
+ if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
+ ctx->TryCancel();
+ }
+ if (server_try_cancel_ != CANCEL_BEFORE_PROCESSING) {
+ if (num_msgs_sent_ < server_responses_to_send_) {
+ NextWrite();
+ }
+ }
+ setup_done_ = true;
+ }
+ void OnDone() override { delete this; }
+ void OnCancel() override {
+ EXPECT_TRUE(setup_done_);
+ EXPECT_TRUE(ctx_->IsCancelled());
+ FinishOnce(Status::CANCELLED);
+ }
+ void OnWriteDone(bool /*ok*/) override {
+ if (num_msgs_sent_ < server_responses_to_send_) {
+ NextWrite();
+ } else if (server_coalescing_api_ != 0) {
+ // We would have already done Finish just after the WriteLast
+ } else if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
+ // Let OnCancel recover this
+ } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
internal::ServerTryCancelNonblocking(ctx_);
- } else {
- FinishOnce(Status::OK);
- }
- }
-
- private:
- void FinishOnce(const Status& s) {
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- Finish(s);
- finished_ = true;
- }
- }
-
- void NextWrite() {
- response_.set_message(request_->message() +
+ } else {
+ FinishOnce(Status::OK);
+ }
+ }
+
+ private:
+ void FinishOnce(const Status& s) {
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ Finish(s);
+ finished_ = true;
+ }
+ }
+
+ void NextWrite() {
+ response_.set_message(request_->message() +
::ToString(num_msgs_sent_));
- if (num_msgs_sent_ == server_responses_to_send_ - 1 &&
- server_coalescing_api_ != 0) {
+ if (num_msgs_sent_ == server_responses_to_send_ - 1 &&
+ server_coalescing_api_ != 0) {
{
std::lock_guard<std::mutex> l(finish_mu_);
if (!finished_) {
@@ -508,59 +508,59 @@ CallbackTestServiceImpl::ResponseStream(
StartWriteLast(&response_, WriteOptions());
}
}
- // If we use WriteLast, we shouldn't wait before attempting Finish
- FinishOnce(Status::OK);
- } else {
+ // If we use WriteLast, we shouldn't wait before attempting Finish
+ FinishOnce(Status::OK);
+ } else {
std::lock_guard<std::mutex> l(finish_mu_);
if (!finished_) {
num_msgs_sent_++;
StartWrite(&response_);
}
- }
- }
- experimental::CallbackServerContext* const ctx_;
- const EchoRequest* const request_;
- EchoResponse response_;
- int num_msgs_sent_{0};
- int server_try_cancel_;
- int server_coalescing_api_;
- int server_responses_to_send_;
- std::mutex finish_mu_;
- bool finished_{false};
- bool setup_done_{false};
- };
- return new Reactor(context, request, server_try_cancel);
-}
-
-experimental::ServerBidiReactor<EchoRequest, EchoResponse>*
-CallbackTestServiceImpl::BidiStream(
- experimental::CallbackServerContext* context) {
- class Reactor : public ::grpc::experimental::ServerBidiReactor<EchoRequest,
- EchoResponse> {
- public:
- explicit Reactor(experimental::CallbackServerContext* ctx) : ctx_(ctx) {
- // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
- // the server by calling ServerContext::TryCancel() depending on the
- // value:
- // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server
- // reads any message from the client CANCEL_DURING_PROCESSING: The RPC
- // is cancelled while the server is reading messages from the client
- // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
- // all the messages from the client
+ }
+ }
+ experimental::CallbackServerContext* const ctx_;
+ const EchoRequest* const request_;
+ EchoResponse response_;
+ int num_msgs_sent_{0};
+ int server_try_cancel_;
+ int server_coalescing_api_;
+ int server_responses_to_send_;
+ std::mutex finish_mu_;
+ bool finished_{false};
+ bool setup_done_{false};
+ };
+ return new Reactor(context, request, server_try_cancel);
+}
+
+experimental::ServerBidiReactor<EchoRequest, EchoResponse>*
+CallbackTestServiceImpl::BidiStream(
+ experimental::CallbackServerContext* context) {
+ class Reactor : public ::grpc::experimental::ServerBidiReactor<EchoRequest,
+ EchoResponse> {
+ public:
+ explicit Reactor(experimental::CallbackServerContext* ctx) : ctx_(ctx) {
+ // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
+ // the server by calling ServerContext::TryCancel() depending on the
+ // value:
+ // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server
+ // reads any message from the client CANCEL_DURING_PROCESSING: The RPC
+ // is cancelled while the server is reading messages from the client
+ // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads
+ // all the messages from the client
server_try_cancel_ = internal::GetIntValueFromMetadata(
- kServerTryCancelRequest, ctx->client_metadata(), DO_NOT_CANCEL);
+ kServerTryCancelRequest, ctx->client_metadata(), DO_NOT_CANCEL);
server_write_last_ = internal::GetIntValueFromMetadata(
kServerFinishAfterNReads, ctx->client_metadata(), 0);
- if (server_try_cancel_ == CANCEL_BEFORE_PROCESSING) {
+ if (server_try_cancel_ == CANCEL_BEFORE_PROCESSING) {
internal::ServerTryCancelNonblocking(ctx);
- } else {
- if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
- ctx->TryCancel();
- }
- StartRead(&request_);
- }
- setup_done_ = true;
- }
+ } else {
+ if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
+ ctx->TryCancel();
+ }
+ StartRead(&request_);
+ }
+ setup_done_ = true;
+ }
void OnDone() override {
{
// Use the same lock as finish to make sure that OnDone isn't inlined.
@@ -570,15 +570,15 @@ CallbackTestServiceImpl::BidiStream(
}
delete this;
}
- void OnCancel() override {
- EXPECT_TRUE(setup_done_);
- EXPECT_TRUE(ctx_->IsCancelled());
- FinishOnce(Status::CANCELLED);
- }
- void OnReadDone(bool ok) override {
- if (ok) {
- num_msgs_read_++;
- response_.set_message(request_.message());
+ void OnCancel() override {
+ EXPECT_TRUE(setup_done_);
+ EXPECT_TRUE(ctx_->IsCancelled());
+ FinishOnce(Status::CANCELLED);
+ }
+ void OnReadDone(bool ok) override {
+ if (ok) {
+ num_msgs_read_++;
+ response_.set_message(request_.message());
std::lock_guard<std::mutex> l(finish_mu_);
if (!finished_) {
if (num_msgs_read_ == server_write_last_) {
@@ -588,51 +588,51 @@ CallbackTestServiceImpl::BidiStream(
StartWrite(&response_);
return;
}
- }
- }
-
- if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
- // Let OnCancel handle this
- } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
+ }
+ }
+
+ if (server_try_cancel_ == CANCEL_DURING_PROCESSING) {
+ // Let OnCancel handle this
+ } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) {
internal::ServerTryCancelNonblocking(ctx_);
- } else {
- FinishOnce(Status::OK);
- }
- }
- void OnWriteDone(bool /*ok*/) override {
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- StartRead(&request_);
- }
- }
-
- private:
- void FinishOnce(const Status& s) {
- std::lock_guard<std::mutex> l(finish_mu_);
- if (!finished_) {
- finished_ = true;
+ } else {
+ FinishOnce(Status::OK);
+ }
+ }
+ void OnWriteDone(bool /*ok*/) override {
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ StartRead(&request_);
+ }
+ }
+
+ private:
+ void FinishOnce(const Status& s) {
+ std::lock_guard<std::mutex> l(finish_mu_);
+ if (!finished_) {
+ finished_ = true;
// Finish asynchronously to make sure that there are no deadlocks.
finish_thread_ = std::thread([this, s] {
std::lock_guard<std::mutex> l(finish_mu_);
Finish(s);
});
- }
- }
-
- experimental::CallbackServerContext* const ctx_;
- EchoRequest request_;
- EchoResponse response_;
- int num_msgs_read_{0};
- int server_try_cancel_;
- int server_write_last_;
- std::mutex finish_mu_;
- bool finished_{false};
- bool setup_done_{false};
+ }
+ }
+
+ experimental::CallbackServerContext* const ctx_;
+ EchoRequest request_;
+ EchoResponse response_;
+ int num_msgs_read_{0};
+ int server_try_cancel_;
+ int server_write_last_;
+ std::mutex finish_mu_;
+ bool finished_{false};
+ bool setup_done_{false};
std::thread finish_thread_;
- };
-
- return new Reactor(context);
-}
-
-} // namespace testing
-} // namespace grpc
+ };
+
+ return new Reactor(context);
+}
+
+} // namespace testing
+} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
index b3adaa3b85..5f207f1979 100644
--- a/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
+++ b/contrib/libs/grpc/test/cpp/end2end/test_service_impl.h
@@ -1,64 +1,64 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
-#define GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
-
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
+#define GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
+
#include <condition_variable>
-#include <memory>
-#include <mutex>
-
-#include <grpc/grpc.h>
+#include <memory>
+#include <mutex>
+
+#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <grpcpp/alarm.h>
#include <grpcpp/security/credentials.h>
-#include <grpcpp/server_context.h>
+#include <grpcpp/server_context.h>
#include <gtest/gtest.h>
-
+
#include <util/generic/string.h>
#include <thread>
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/util/string_ref_helper.h"
-
+
#include <util/string/cast.h>
using std::chrono::system_clock;
-namespace grpc {
-namespace testing {
-
-const int kServerDefaultResponseStreamsToSend = 3;
-const char* const kServerResponseStreamsToSend = "server_responses_to_send";
-const char* const kServerTryCancelRequest = "server_try_cancel";
-const char* const kDebugInfoTrailerKey = "debug-info-bin";
-const char* const kServerFinishAfterNReads = "server_finish_after_n_reads";
-const char* const kServerUseCoalescingApi = "server_use_coalescing_api";
-const char* const kCheckClientInitialMetadataKey = "custom_client_metadata";
-const char* const kCheckClientInitialMetadataVal = "Value for client metadata";
-
-typedef enum {
- DO_NOT_CANCEL = 0,
- CANCEL_BEFORE_PROCESSING,
- CANCEL_DURING_PROCESSING,
- CANCEL_AFTER_PROCESSING
-} ServerTryCancelRequestPhase;
-
+namespace grpc {
+namespace testing {
+
+const int kServerDefaultResponseStreamsToSend = 3;
+const char* const kServerResponseStreamsToSend = "server_responses_to_send";
+const char* const kServerTryCancelRequest = "server_try_cancel";
+const char* const kDebugInfoTrailerKey = "debug-info-bin";
+const char* const kServerFinishAfterNReads = "server_finish_after_n_reads";
+const char* const kServerUseCoalescingApi = "server_use_coalescing_api";
+const char* const kCheckClientInitialMetadataKey = "custom_client_metadata";
+const char* const kCheckClientInitialMetadataVal = "Value for client metadata";
+
+typedef enum {
+ DO_NOT_CANCEL = 0,
+ CANCEL_BEFORE_PROCESSING,
+ CANCEL_DURING_PROCESSING,
+ CANCEL_AFTER_PROCESSING
+} ServerTryCancelRequestPhase;
+
namespace internal {
// When echo_deadline is requested, deadline seen in the ServerContext is set in
// the response in seconds.
@@ -119,19 +119,19 @@ class TestServiceSignaller {
template <typename RpcService>
class TestMultipleServiceImpl : public RpcService {
- public:
+ public:
TestMultipleServiceImpl() : signal_client_(false), host_() {}
explicit TestMultipleServiceImpl(const TString& host)
: signal_client_(false), host_(new TString(host)) {}
-
- Status Echo(ServerContext* context, const EchoRequest* request,
+
+ Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) {
if (request->has_param() &&
request->param().server_notify_client_when_started()) {
signaller_.SignalClientThatRpcStarted();
signaller_.ServerWaitToContinue();
}
-
+
// A bit of sleep to make sure that short deadline tests fail
if (request->has_param() && request->param().server_sleep_us() > 0) {
gpr_sleep_until(
@@ -248,7 +248,7 @@ class TestMultipleServiceImpl : public RpcService {
return Echo(context, request, response);
}
- Status CheckClientInitialMetadata(ServerContext* context,
+ Status CheckClientInitialMetadata(ServerContext* context,
const SimpleRequest42* /*request*/,
SimpleResponse42* /*response*/) {
EXPECT_EQ(internal::MetadataMatchCount(context->client_metadata(),
@@ -259,11 +259,11 @@ class TestMultipleServiceImpl : public RpcService {
context->client_metadata().count(kCheckClientInitialMetadataKey));
return Status::OK;
}
-
- // Unimplemented is left unimplemented to test the returned error.
-
- Status RequestStream(ServerContext* context,
- ServerReader<EchoRequest>* reader,
+
+ // Unimplemented is left unimplemented to test the returned error.
+
+ Status RequestStream(ServerContext* context,
+ ServerReader<EchoRequest>* reader,
EchoResponse* response) {
// If 'server_try_cancel' is set in the metadata, the RPC is cancelled by
// the server by calling ServerContext::TryCancel() depending on the value:
@@ -275,7 +275,7 @@ class TestMultipleServiceImpl : public RpcService {
// all the messages from the client
int server_try_cancel = internal::GetIntValueFromMetadata(
kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
-
+
EchoRequest request;
response->set_message("");
@@ -312,7 +312,7 @@ class TestMultipleServiceImpl : public RpcService {
// Return 'kNumResponseStreamMsgs' messages.
// TODO(yangg) make it generic by adding a parameter into EchoRequest
- Status ResponseStream(ServerContext* context, const EchoRequest* request,
+ Status ResponseStream(ServerContext* context, const EchoRequest* request,
ServerWriter<EchoResponse>* writer) {
// If server_try_cancel is set in the metadata, the RPC is cancelled by the
// server by calling ServerContext::TryCancel() depending on the value:
@@ -324,10 +324,10 @@ class TestMultipleServiceImpl : public RpcService {
// all the messages to the client
int server_try_cancel = internal::GetIntValueFromMetadata(
kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL);
-
+
int server_coalescing_api = internal::GetIntValueFromMetadata(
kServerUseCoalescingApi, context->client_metadata(), 0);
-
+
int server_responses_to_send = internal::GetIntValueFromMetadata(
kServerResponseStreamsToSend, context->client_metadata(),
kServerDefaultResponseStreamsToSend);
@@ -426,70 +426,70 @@ class TestMultipleServiceImpl : public RpcService {
}
// Unimplemented is left unimplemented to test the returned error.
- bool signal_client() {
- std::unique_lock<std::mutex> lock(mu_);
- return signal_client_;
- }
+ bool signal_client() {
+ std::unique_lock<std::mutex> lock(mu_);
+ return signal_client_;
+ }
void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
uint64_t RpcsWaitingForClientCancel() {
std::unique_lock<std::mutex> lock(mu_);
return rpcs_waiting_for_client_cancel_;
}
-
- private:
- bool signal_client_;
- std::mutex mu_;
+
+ private:
+ bool signal_client_;
+ std::mutex mu_;
TestServiceSignaller signaller_;
std::unique_ptr<TString> host_;
uint64_t rpcs_waiting_for_client_cancel_ = 0;
-};
-
-class CallbackTestServiceImpl
- : public ::grpc::testing::EchoTestService::ExperimentalCallbackService {
- public:
- CallbackTestServiceImpl() : signal_client_(false), host_() {}
+};
+
+class CallbackTestServiceImpl
+ : public ::grpc::testing::EchoTestService::ExperimentalCallbackService {
+ public:
+ CallbackTestServiceImpl() : signal_client_(false), host_() {}
explicit CallbackTestServiceImpl(const TString& host)
: signal_client_(false), host_(new TString(host)) {}
-
- experimental::ServerUnaryReactor* Echo(
- experimental::CallbackServerContext* context, const EchoRequest* request,
- EchoResponse* response) override;
-
- experimental::ServerUnaryReactor* CheckClientInitialMetadata(
+
+ experimental::ServerUnaryReactor* Echo(
+ experimental::CallbackServerContext* context, const EchoRequest* request,
+ EchoResponse* response) override;
+
+ experimental::ServerUnaryReactor* CheckClientInitialMetadata(
experimental::CallbackServerContext* context, const SimpleRequest42*,
SimpleResponse42*) override;
-
- experimental::ServerReadReactor<EchoRequest>* RequestStream(
- experimental::CallbackServerContext* context,
- EchoResponse* response) override;
-
- experimental::ServerWriteReactor<EchoResponse>* ResponseStream(
- experimental::CallbackServerContext* context,
- const EchoRequest* request) override;
-
- experimental::ServerBidiReactor<EchoRequest, EchoResponse>* BidiStream(
- experimental::CallbackServerContext* context) override;
-
- // Unimplemented is left unimplemented to test the returned error.
- bool signal_client() {
- std::unique_lock<std::mutex> lock(mu_);
- return signal_client_;
- }
+
+ experimental::ServerReadReactor<EchoRequest>* RequestStream(
+ experimental::CallbackServerContext* context,
+ EchoResponse* response) override;
+
+ experimental::ServerWriteReactor<EchoResponse>* ResponseStream(
+ experimental::CallbackServerContext* context,
+ const EchoRequest* request) override;
+
+ experimental::ServerBidiReactor<EchoRequest, EchoResponse>* BidiStream(
+ experimental::CallbackServerContext* context) override;
+
+ // Unimplemented is left unimplemented to test the returned error.
+ bool signal_client() {
+ std::unique_lock<std::mutex> lock(mu_);
+ return signal_client_;
+ }
void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); }
void SignalServerToContinue() { signaller_.SignalServerToContinue(); }
-
- private:
- bool signal_client_;
- std::mutex mu_;
+
+ private:
+ bool signal_client_;
+ std::mutex mu_;
TestServiceSignaller signaller_;
std::unique_ptr<TString> host_;
-};
-
+};
+
using TestServiceImpl =
TestMultipleServiceImpl<::grpc::testing::EchoTestService::Service>;
-} // namespace testing
-} // namespace grpc
-
-#endif // GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
+} // namespace testing
+} // namespace grpc
+
+#endif // GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H
diff --git a/contrib/libs/grpc/test/cpp/end2end/thread/ya.make_ b/contrib/libs/grpc/test/cpp/end2end/thread/ya.make_
index 296111dfc2..afabda1c8f 100644
--- a/contrib/libs/grpc/test/cpp/end2end/thread/ya.make_
+++ b/contrib/libs/grpc/test/cpp/end2end/thread/ya.make_
@@ -1,31 +1,31 @@
GTEST_UGLY()
-
-OWNER(
- dvshkurko
- g:ymake
-)
-
-ADDINCL(
- ${ARCADIA_ROOT}/contrib/libs/grpc
-)
-
-PEERDIR(
- contrib/libs/grpc/src/proto/grpc/core
- contrib/libs/grpc/src/proto/grpc/testing
- contrib/libs/grpc/src/proto/grpc/testing/duplicate
- contrib/libs/grpc/test/core/util
- contrib/libs/grpc/test/cpp/end2end
- contrib/libs/grpc/test/cpp/util
-)
-
-NO_COMPILER_WARNINGS()
-
-SRCDIR(
- contrib/libs/grpc/test/cpp/end2end
-)
-
-SRCS(
- thread_stress_test.cc
-)
-
-END()
+
+OWNER(
+ dvshkurko
+ g:ymake
+)
+
+ADDINCL(
+ ${ARCADIA_ROOT}/contrib/libs/grpc
+)
+
+PEERDIR(
+ contrib/libs/grpc/src/proto/grpc/core
+ contrib/libs/grpc/src/proto/grpc/testing
+ contrib/libs/grpc/src/proto/grpc/testing/duplicate
+ contrib/libs/grpc/test/core/util
+ contrib/libs/grpc/test/cpp/end2end
+ contrib/libs/grpc/test/cpp/util
+)
+
+NO_COMPILER_WARNINGS()
+
+SRCDIR(
+ contrib/libs/grpc/test/cpp/end2end
+)
+
+SRCS(
+ thread_stress_test.cc
+)
+
+END()
diff --git a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
index d187a8a952..8acb953729 100644
--- a/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/thread_stress_test.cc
@@ -1,442 +1,442 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <cinttypes>
-#include <mutex>
-#include <thread>
-
-#include <grpc/grpc.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/impl/codegen/sync.h>
-#include <grpcpp/resource_quota.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/lib/gpr/env.h"
-#include "src/core/lib/surface/api_trace.h"
-#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-
-#include <gtest/gtest.h>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-using std::chrono::system_clock;
-
-const int kNumThreads = 100; // Number of threads
-const int kNumAsyncSendThreads = 2;
-const int kNumAsyncReceiveThreads = 50;
-const int kNumAsyncServerThreads = 50;
-const int kNumRpcs = 1000; // Number of RPCs per thread
-
-namespace grpc {
-namespace testing {
-
-class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
- public:
- TestServiceImpl() {}
-
- Status Echo(ServerContext* /*context*/, const EchoRequest* request,
- EchoResponse* response) override {
- response->set_message(request->message());
- return Status::OK;
- }
-};
-
-template <class Service>
-class CommonStressTest {
- public:
- CommonStressTest() : kMaxMessageSize_(8192) {
-#if TARGET_OS_IPHONE
- // Workaround Apple CFStream bug
- gpr_setenv("grpc_cfstream", "0");
-#endif
- }
- virtual ~CommonStressTest() {}
- virtual void SetUp() = 0;
- virtual void TearDown() = 0;
- virtual void ResetStub() = 0;
- virtual bool AllowExhaustion() = 0;
- grpc::testing::EchoTestService::Stub* GetStub() { return stub_.get(); }
-
- protected:
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
- std::unique_ptr<Server> server_;
-
- virtual void SetUpStart(ServerBuilder* builder, Service* service) = 0;
- void SetUpStartCommon(ServerBuilder* builder, Service* service) {
- builder->RegisterService(service);
- builder->SetMaxMessageSize(
- kMaxMessageSize_); // For testing max message size.
- }
- void SetUpEnd(ServerBuilder* builder) { server_ = builder->BuildAndStart(); }
- void TearDownStart() { server_->Shutdown(); }
- void TearDownEnd() {}
-
- private:
- const int kMaxMessageSize_;
-};
-
-template <class Service>
-class CommonStressTestInsecure : public CommonStressTest<Service> {
- public:
- void ResetStub() override {
- std::shared_ptr<Channel> channel = grpc::CreateChannel(
- server_address_.str(), InsecureChannelCredentials());
- this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
- bool AllowExhaustion() override { return false; }
-
- protected:
- void SetUpStart(ServerBuilder* builder, Service* service) override {
- int port = 5003; // grpc_pick_unused_port_or_die();
- this->server_address_ << "localhost:" << port;
- // Setup server
- builder->AddListeningPort(server_address_.str(),
- InsecureServerCredentials());
- this->SetUpStartCommon(builder, service);
- }
-
- private:
- std::ostringstream server_address_;
-};
-
-template <class Service, bool allow_resource_exhaustion>
-class CommonStressTestInproc : public CommonStressTest<Service> {
- public:
- void ResetStub() override {
- ChannelArguments args;
- std::shared_ptr<Channel> channel = this->server_->InProcessChannel(args);
- this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
- }
- bool AllowExhaustion() override { return allow_resource_exhaustion; }
-
- protected:
- void SetUpStart(ServerBuilder* builder, Service* service) override {
- this->SetUpStartCommon(builder, service);
- }
-};
-
-template <class BaseClass>
-class CommonStressTestSyncServer : public BaseClass {
- public:
- void SetUp() override {
- ServerBuilder builder;
- this->SetUpStart(&builder, &service_);
- this->SetUpEnd(&builder);
- }
- void TearDown() override {
- this->TearDownStart();
- this->TearDownEnd();
- }
-
- private:
- TestServiceImpl service_;
-};
-
-template <class BaseClass>
-class CommonStressTestSyncServerLowThreadCount : public BaseClass {
- public:
- void SetUp() override {
- ServerBuilder builder;
- ResourceQuota quota;
- this->SetUpStart(&builder, &service_);
- quota.SetMaxThreads(4);
- builder.SetResourceQuota(quota);
- this->SetUpEnd(&builder);
- }
- void TearDown() override {
- this->TearDownStart();
- this->TearDownEnd();
- }
-
- private:
- TestServiceImpl service_;
-};
-
-template <class BaseClass>
-class CommonStressTestAsyncServer : public BaseClass {
- public:
- CommonStressTestAsyncServer() : contexts_(kNumAsyncServerThreads * 100) {}
- void SetUp() override {
- shutting_down_ = false;
- ServerBuilder builder;
- this->SetUpStart(&builder, &service_);
- cq_ = builder.AddCompletionQueue();
- this->SetUpEnd(&builder);
- for (int i = 0; i < kNumAsyncServerThreads * 100; i++) {
- RefreshContext(i);
- }
- for (int i = 0; i < kNumAsyncServerThreads; i++) {
- server_threads_.emplace_back(&CommonStressTestAsyncServer::ProcessRpcs,
- this);
- }
- }
- void TearDown() override {
- {
- grpc::internal::MutexLock l(&mu_);
- this->TearDownStart();
- shutting_down_ = true;
- cq_->Shutdown();
- }
-
- for (int i = 0; i < kNumAsyncServerThreads; i++) {
- server_threads_[i].join();
- }
-
- void* ignored_tag;
- bool ignored_ok;
- while (cq_->Next(&ignored_tag, &ignored_ok))
- ;
- this->TearDownEnd();
- }
-
- private:
- void ProcessRpcs() {
- void* tag;
- bool ok;
- while (cq_->Next(&tag, &ok)) {
- if (ok) {
- int i = static_cast<int>(reinterpret_cast<intptr_t>(tag));
- switch (contexts_[i].state) {
- case Context::READY: {
- contexts_[i].state = Context::DONE;
- EchoResponse send_response;
- send_response.set_message(contexts_[i].recv_request.message());
- contexts_[i].response_writer->Finish(send_response, Status::OK,
- tag);
- break;
- }
- case Context::DONE:
- RefreshContext(i);
- break;
- }
- }
- }
- }
- void RefreshContext(int i) {
- grpc::internal::MutexLock l(&mu_);
- if (!shutting_down_) {
- contexts_[i].state = Context::READY;
- contexts_[i].srv_ctx.reset(new ServerContext);
- contexts_[i].response_writer.reset(
- new grpc::ServerAsyncResponseWriter<EchoResponse>(
- contexts_[i].srv_ctx.get()));
- service_.RequestEcho(contexts_[i].srv_ctx.get(),
- &contexts_[i].recv_request,
- contexts_[i].response_writer.get(), cq_.get(),
- cq_.get(), (void*)static_cast<intptr_t>(i));
- }
- }
- struct Context {
- std::unique_ptr<ServerContext> srv_ctx;
- std::unique_ptr<grpc::ServerAsyncResponseWriter<EchoResponse>>
- response_writer;
- EchoRequest recv_request;
- enum { READY, DONE } state;
- };
- std::vector<Context> contexts_;
- ::grpc::testing::EchoTestService::AsyncService service_;
- std::unique_ptr<ServerCompletionQueue> cq_;
- bool shutting_down_;
- grpc::internal::Mutex mu_;
- std::vector<std::thread> server_threads_;
-};
-
-template <class Common>
-class End2endTest : public ::testing::Test {
- protected:
- End2endTest() {}
- void SetUp() override { common_.SetUp(); }
- void TearDown() override { common_.TearDown(); }
- void ResetStub() { common_.ResetStub(); }
-
- Common common_;
-};
-
-static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
- bool allow_exhaustion, gpr_atm* errors) {
- EchoRequest request;
- EchoResponse response;
- request.set_message("Hello");
-
- for (int i = 0; i < num_rpcs; ++i) {
- ClientContext context;
- Status s = stub->Echo(&context, request, &response);
- EXPECT_TRUE(s.ok() || (allow_exhaustion &&
- s.error_code() == StatusCode::RESOURCE_EXHAUSTED));
- if (!s.ok()) {
- if (!(allow_exhaustion &&
- s.error_code() == StatusCode::RESOURCE_EXHAUSTED)) {
- gpr_log(GPR_ERROR, "RPC error: %d: %s", s.error_code(),
- s.error_message().c_str());
- }
- gpr_atm_no_barrier_fetch_add(errors, static_cast<gpr_atm>(1));
- } else {
- EXPECT_EQ(response.message(), request.message());
- }
- }
-}
-
-typedef ::testing::Types<
- CommonStressTestSyncServer<CommonStressTestInsecure<TestServiceImpl>>,
- CommonStressTestSyncServer<CommonStressTestInproc<TestServiceImpl, false>>,
- CommonStressTestSyncServerLowThreadCount<
- CommonStressTestInproc<TestServiceImpl, true>>,
- CommonStressTestAsyncServer<
- CommonStressTestInsecure<grpc::testing::EchoTestService::AsyncService>>,
- CommonStressTestAsyncServer<CommonStressTestInproc<
- grpc::testing::EchoTestService::AsyncService, false>>>
- CommonTypes;
-TYPED_TEST_SUITE(End2endTest, CommonTypes);
-TYPED_TEST(End2endTest, ThreadStress) {
- this->common_.ResetStub();
- std::vector<std::thread> threads;
- gpr_atm errors;
- gpr_atm_rel_store(&errors, static_cast<gpr_atm>(0));
- threads.reserve(kNumThreads);
- for (int i = 0; i < kNumThreads; ++i) {
- threads.emplace_back(SendRpc, this->common_.GetStub(), kNumRpcs,
- this->common_.AllowExhaustion(), &errors);
- }
- for (int i = 0; i < kNumThreads; ++i) {
- threads[i].join();
- }
- uint64_t error_cnt = static_cast<uint64_t>(gpr_atm_no_barrier_load(&errors));
- if (error_cnt != 0) {
- gpr_log(GPR_INFO, "RPC error count: %" PRIu64, error_cnt);
- }
- // If this test allows resource exhaustion, expect that it actually sees some
- if (this->common_.AllowExhaustion()) {
- EXPECT_GT(error_cnt, static_cast<uint64_t>(0));
- }
-}
-
-template <class Common>
-class AsyncClientEnd2endTest : public ::testing::Test {
- protected:
- AsyncClientEnd2endTest() : rpcs_outstanding_(0) {}
-
- void SetUp() override { common_.SetUp(); }
- void TearDown() override {
- void* ignored_tag;
- bool ignored_ok;
- while (cq_.Next(&ignored_tag, &ignored_ok))
- ;
- common_.TearDown();
- }
-
- void Wait() {
- grpc::internal::MutexLock l(&mu_);
- while (rpcs_outstanding_ != 0) {
- cv_.Wait(&mu_);
- }
-
- cq_.Shutdown();
- }
-
- struct AsyncClientCall {
- EchoResponse response;
- ClientContext context;
- Status status;
- std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader;
- };
-
- void AsyncSendRpc(int num_rpcs) {
- for (int i = 0; i < num_rpcs; ++i) {
- AsyncClientCall* call = new AsyncClientCall;
- EchoRequest request;
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <cinttypes>
+#include <mutex>
+#include <thread>
+
+#include <grpc/grpc.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/impl/codegen/sync.h>
+#include <grpcpp/resource_quota.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/surface/api_trace.h"
+#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+
+#include <gtest/gtest.h>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+using std::chrono::system_clock;
+
+const int kNumThreads = 100; // Number of threads
+const int kNumAsyncSendThreads = 2;
+const int kNumAsyncReceiveThreads = 50;
+const int kNumAsyncServerThreads = 50;
+const int kNumRpcs = 1000; // Number of RPCs per thread
+
+namespace grpc {
+namespace testing {
+
+class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
+ public:
+ TestServiceImpl() {}
+
+ Status Echo(ServerContext* /*context*/, const EchoRequest* request,
+ EchoResponse* response) override {
+ response->set_message(request->message());
+ return Status::OK;
+ }
+};
+
+template <class Service>
+class CommonStressTest {
+ public:
+ CommonStressTest() : kMaxMessageSize_(8192) {
+#if TARGET_OS_IPHONE
+ // Workaround Apple CFStream bug
+ gpr_setenv("grpc_cfstream", "0");
+#endif
+ }
+ virtual ~CommonStressTest() {}
+ virtual void SetUp() = 0;
+ virtual void TearDown() = 0;
+ virtual void ResetStub() = 0;
+ virtual bool AllowExhaustion() = 0;
+ grpc::testing::EchoTestService::Stub* GetStub() { return stub_.get(); }
+
+ protected:
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+ std::unique_ptr<Server> server_;
+
+ virtual void SetUpStart(ServerBuilder* builder, Service* service) = 0;
+ void SetUpStartCommon(ServerBuilder* builder, Service* service) {
+ builder->RegisterService(service);
+ builder->SetMaxMessageSize(
+ kMaxMessageSize_); // For testing max message size.
+ }
+ void SetUpEnd(ServerBuilder* builder) { server_ = builder->BuildAndStart(); }
+ void TearDownStart() { server_->Shutdown(); }
+ void TearDownEnd() {}
+
+ private:
+ const int kMaxMessageSize_;
+};
+
+template <class Service>
+class CommonStressTestInsecure : public CommonStressTest<Service> {
+ public:
+ void ResetStub() override {
+ std::shared_ptr<Channel> channel = grpc::CreateChannel(
+ server_address_.str(), InsecureChannelCredentials());
+ this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+ bool AllowExhaustion() override { return false; }
+
+ protected:
+ void SetUpStart(ServerBuilder* builder, Service* service) override {
+ int port = 5003; // grpc_pick_unused_port_or_die();
+ this->server_address_ << "localhost:" << port;
+ // Setup server
+ builder->AddListeningPort(server_address_.str(),
+ InsecureServerCredentials());
+ this->SetUpStartCommon(builder, service);
+ }
+
+ private:
+ std::ostringstream server_address_;
+};
+
+template <class Service, bool allow_resource_exhaustion>
+class CommonStressTestInproc : public CommonStressTest<Service> {
+ public:
+ void ResetStub() override {
+ ChannelArguments args;
+ std::shared_ptr<Channel> channel = this->server_->InProcessChannel(args);
+ this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
+ }
+ bool AllowExhaustion() override { return allow_resource_exhaustion; }
+
+ protected:
+ void SetUpStart(ServerBuilder* builder, Service* service) override {
+ this->SetUpStartCommon(builder, service);
+ }
+};
+
+template <class BaseClass>
+class CommonStressTestSyncServer : public BaseClass {
+ public:
+ void SetUp() override {
+ ServerBuilder builder;
+ this->SetUpStart(&builder, &service_);
+ this->SetUpEnd(&builder);
+ }
+ void TearDown() override {
+ this->TearDownStart();
+ this->TearDownEnd();
+ }
+
+ private:
+ TestServiceImpl service_;
+};
+
+template <class BaseClass>
+class CommonStressTestSyncServerLowThreadCount : public BaseClass {
+ public:
+ void SetUp() override {
+ ServerBuilder builder;
+ ResourceQuota quota;
+ this->SetUpStart(&builder, &service_);
+ quota.SetMaxThreads(4);
+ builder.SetResourceQuota(quota);
+ this->SetUpEnd(&builder);
+ }
+ void TearDown() override {
+ this->TearDownStart();
+ this->TearDownEnd();
+ }
+
+ private:
+ TestServiceImpl service_;
+};
+
+template <class BaseClass>
+class CommonStressTestAsyncServer : public BaseClass {
+ public:
+ CommonStressTestAsyncServer() : contexts_(kNumAsyncServerThreads * 100) {}
+ void SetUp() override {
+ shutting_down_ = false;
+ ServerBuilder builder;
+ this->SetUpStart(&builder, &service_);
+ cq_ = builder.AddCompletionQueue();
+ this->SetUpEnd(&builder);
+ for (int i = 0; i < kNumAsyncServerThreads * 100; i++) {
+ RefreshContext(i);
+ }
+ for (int i = 0; i < kNumAsyncServerThreads; i++) {
+ server_threads_.emplace_back(&CommonStressTestAsyncServer::ProcessRpcs,
+ this);
+ }
+ }
+ void TearDown() override {
+ {
+ grpc::internal::MutexLock l(&mu_);
+ this->TearDownStart();
+ shutting_down_ = true;
+ cq_->Shutdown();
+ }
+
+ for (int i = 0; i < kNumAsyncServerThreads; i++) {
+ server_threads_[i].join();
+ }
+
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cq_->Next(&ignored_tag, &ignored_ok))
+ ;
+ this->TearDownEnd();
+ }
+
+ private:
+ void ProcessRpcs() {
+ void* tag;
+ bool ok;
+ while (cq_->Next(&tag, &ok)) {
+ if (ok) {
+ int i = static_cast<int>(reinterpret_cast<intptr_t>(tag));
+ switch (contexts_[i].state) {
+ case Context::READY: {
+ contexts_[i].state = Context::DONE;
+ EchoResponse send_response;
+ send_response.set_message(contexts_[i].recv_request.message());
+ contexts_[i].response_writer->Finish(send_response, Status::OK,
+ tag);
+ break;
+ }
+ case Context::DONE:
+ RefreshContext(i);
+ break;
+ }
+ }
+ }
+ }
+ void RefreshContext(int i) {
+ grpc::internal::MutexLock l(&mu_);
+ if (!shutting_down_) {
+ contexts_[i].state = Context::READY;
+ contexts_[i].srv_ctx.reset(new ServerContext);
+ contexts_[i].response_writer.reset(
+ new grpc::ServerAsyncResponseWriter<EchoResponse>(
+ contexts_[i].srv_ctx.get()));
+ service_.RequestEcho(contexts_[i].srv_ctx.get(),
+ &contexts_[i].recv_request,
+ contexts_[i].response_writer.get(), cq_.get(),
+ cq_.get(), (void*)static_cast<intptr_t>(i));
+ }
+ }
+ struct Context {
+ std::unique_ptr<ServerContext> srv_ctx;
+ std::unique_ptr<grpc::ServerAsyncResponseWriter<EchoResponse>>
+ response_writer;
+ EchoRequest recv_request;
+ enum { READY, DONE } state;
+ };
+ std::vector<Context> contexts_;
+ ::grpc::testing::EchoTestService::AsyncService service_;
+ std::unique_ptr<ServerCompletionQueue> cq_;
+ bool shutting_down_;
+ grpc::internal::Mutex mu_;
+ std::vector<std::thread> server_threads_;
+};
+
+template <class Common>
+class End2endTest : public ::testing::Test {
+ protected:
+ End2endTest() {}
+ void SetUp() override { common_.SetUp(); }
+ void TearDown() override { common_.TearDown(); }
+ void ResetStub() { common_.ResetStub(); }
+
+ Common common_;
+};
+
+static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
+ bool allow_exhaustion, gpr_atm* errors) {
+ EchoRequest request;
+ EchoResponse response;
+ request.set_message("Hello");
+
+ for (int i = 0; i < num_rpcs; ++i) {
+ ClientContext context;
+ Status s = stub->Echo(&context, request, &response);
+ EXPECT_TRUE(s.ok() || (allow_exhaustion &&
+ s.error_code() == StatusCode::RESOURCE_EXHAUSTED));
+ if (!s.ok()) {
+ if (!(allow_exhaustion &&
+ s.error_code() == StatusCode::RESOURCE_EXHAUSTED)) {
+ gpr_log(GPR_ERROR, "RPC error: %d: %s", s.error_code(),
+ s.error_message().c_str());
+ }
+ gpr_atm_no_barrier_fetch_add(errors, static_cast<gpr_atm>(1));
+ } else {
+ EXPECT_EQ(response.message(), request.message());
+ }
+ }
+}
+
+typedef ::testing::Types<
+ CommonStressTestSyncServer<CommonStressTestInsecure<TestServiceImpl>>,
+ CommonStressTestSyncServer<CommonStressTestInproc<TestServiceImpl, false>>,
+ CommonStressTestSyncServerLowThreadCount<
+ CommonStressTestInproc<TestServiceImpl, true>>,
+ CommonStressTestAsyncServer<
+ CommonStressTestInsecure<grpc::testing::EchoTestService::AsyncService>>,
+ CommonStressTestAsyncServer<CommonStressTestInproc<
+ grpc::testing::EchoTestService::AsyncService, false>>>
+ CommonTypes;
+TYPED_TEST_SUITE(End2endTest, CommonTypes);
+TYPED_TEST(End2endTest, ThreadStress) {
+ this->common_.ResetStub();
+ std::vector<std::thread> threads;
+ gpr_atm errors;
+ gpr_atm_rel_store(&errors, static_cast<gpr_atm>(0));
+ threads.reserve(kNumThreads);
+ for (int i = 0; i < kNumThreads; ++i) {
+ threads.emplace_back(SendRpc, this->common_.GetStub(), kNumRpcs,
+ this->common_.AllowExhaustion(), &errors);
+ }
+ for (int i = 0; i < kNumThreads; ++i) {
+ threads[i].join();
+ }
+ uint64_t error_cnt = static_cast<uint64_t>(gpr_atm_no_barrier_load(&errors));
+ if (error_cnt != 0) {
+ gpr_log(GPR_INFO, "RPC error count: %" PRIu64, error_cnt);
+ }
+ // If this test allows resource exhaustion, expect that it actually sees some
+ if (this->common_.AllowExhaustion()) {
+ EXPECT_GT(error_cnt, static_cast<uint64_t>(0));
+ }
+}
+
+template <class Common>
+class AsyncClientEnd2endTest : public ::testing::Test {
+ protected:
+ AsyncClientEnd2endTest() : rpcs_outstanding_(0) {}
+
+ void SetUp() override { common_.SetUp(); }
+ void TearDown() override {
+ void* ignored_tag;
+ bool ignored_ok;
+ while (cq_.Next(&ignored_tag, &ignored_ok))
+ ;
+ common_.TearDown();
+ }
+
+ void Wait() {
+ grpc::internal::MutexLock l(&mu_);
+ while (rpcs_outstanding_ != 0) {
+ cv_.Wait(&mu_);
+ }
+
+ cq_.Shutdown();
+ }
+
+ struct AsyncClientCall {
+ EchoResponse response;
+ ClientContext context;
+ Status status;
+ std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader;
+ };
+
+ void AsyncSendRpc(int num_rpcs) {
+ for (int i = 0; i < num_rpcs; ++i) {
+ AsyncClientCall* call = new AsyncClientCall;
+ EchoRequest request;
request.set_message(TString("Hello: " + grpc::to_string(i)).c_str());
- call->response_reader =
- common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
- call->response_reader->Finish(&call->response, &call->status,
- (void*)call);
-
- grpc::internal::MutexLock l(&mu_);
- rpcs_outstanding_++;
- }
- }
-
- void AsyncCompleteRpc() {
- while (true) {
- void* got_tag;
- bool ok = false;
- if (!cq_.Next(&got_tag, &ok)) break;
- AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
- if (!ok) {
- gpr_log(GPR_DEBUG, "Error: %d", call->status.error_code());
- }
- delete call;
-
- bool notify;
- {
- grpc::internal::MutexLock l(&mu_);
- rpcs_outstanding_--;
- notify = (rpcs_outstanding_ == 0);
- }
- if (notify) {
- cv_.Signal();
- }
- }
- }
-
- Common common_;
- CompletionQueue cq_;
- grpc::internal::Mutex mu_;
- grpc::internal::CondVar cv_;
- int rpcs_outstanding_;
-};
-
-TYPED_TEST_SUITE(AsyncClientEnd2endTest, CommonTypes);
-TYPED_TEST(AsyncClientEnd2endTest, ThreadStress) {
- this->common_.ResetStub();
- std::vector<std::thread> send_threads, completion_threads;
- for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
- completion_threads.emplace_back(
- &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncCompleteRpc,
- this);
- }
- for (int i = 0; i < kNumAsyncSendThreads; ++i) {
- send_threads.emplace_back(
- &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncSendRpc,
- this, kNumRpcs);
- }
- for (int i = 0; i < kNumAsyncSendThreads; ++i) {
- send_threads[i].join();
- }
-
- this->Wait();
- for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
- completion_threads[i].join();
- }
-}
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
+ call->response_reader =
+ common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
+ call->response_reader->Finish(&call->response, &call->status,
+ (void*)call);
+
+ grpc::internal::MutexLock l(&mu_);
+ rpcs_outstanding_++;
+ }
+ }
+
+ void AsyncCompleteRpc() {
+ while (true) {
+ void* got_tag;
+ bool ok = false;
+ if (!cq_.Next(&got_tag, &ok)) break;
+ AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
+ if (!ok) {
+ gpr_log(GPR_DEBUG, "Error: %d", call->status.error_code());
+ }
+ delete call;
+
+ bool notify;
+ {
+ grpc::internal::MutexLock l(&mu_);
+ rpcs_outstanding_--;
+ notify = (rpcs_outstanding_ == 0);
+ }
+ if (notify) {
+ cv_.Signal();
+ }
+ }
+ }
+
+ Common common_;
+ CompletionQueue cq_;
+ grpc::internal::Mutex mu_;
+ grpc::internal::CondVar cv_;
+ int rpcs_outstanding_;
+};
+
+TYPED_TEST_SUITE(AsyncClientEnd2endTest, CommonTypes);
+TYPED_TEST(AsyncClientEnd2endTest, ThreadStress) {
+ this->common_.ResetStub();
+ std::vector<std::thread> send_threads, completion_threads;
+ for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
+ completion_threads.emplace_back(
+ &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncCompleteRpc,
+ this);
+ }
+ for (int i = 0; i < kNumAsyncSendThreads; ++i) {
+ send_threads.emplace_back(
+ &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncSendRpc,
+ this, kNumRpcs);
+ }
+ for (int i = 0; i < kNumAsyncSendThreads; ++i) {
+ send_threads[i].join();
+ }
+
+ this->Wait();
+ for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
+ completion_threads[i].join();
+ }
+}
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
index d83b0ae7a4..48b9eace12 100644
--- a/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
+++ b/contrib/libs/grpc/test/cpp/end2end/time_change_test.cc
@@ -1,367 +1,367 @@
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/grpc.h>
-#include <grpc/support/log.h>
-#include <grpc/support/time.h>
-#include <grpcpp/channel.h>
-#include <grpcpp/client_context.h>
-#include <grpcpp/create_channel.h>
-#include <grpcpp/server.h>
-#include <grpcpp/server_builder.h>
-#include <grpcpp/server_context.h>
-
-#include "src/core/lib/iomgr/timer.h"
-#include "src/proto/grpc/testing/echo.grpc.pb.h"
-#include "test/core/util/port.h"
-#include "test/core/util/test_config.h"
-#include "test/cpp/end2end/test_service_impl.h"
-#include "test/cpp/util/subprocess.h"
-
-#include <gtest/gtest.h>
-#include <sys/time.h>
-#include <thread>
-
-using grpc::testing::EchoRequest;
-using grpc::testing::EchoResponse;
-
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <grpc/grpc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/client_context.h>
+#include <grpcpp/create_channel.h>
+#include <grpcpp/server.h>
+#include <grpcpp/server_builder.h>
+#include <grpcpp/server_context.h>
+
+#include "src/core/lib/iomgr/timer.h"
+#include "src/proto/grpc/testing/echo.grpc.pb.h"
+#include "test/core/util/port.h"
+#include "test/core/util/test_config.h"
+#include "test/cpp/end2end/test_service_impl.h"
+#include "test/cpp/util/subprocess.h"
+
+#include <gtest/gtest.h>
+#include <sys/time.h>
+#include <thread>
+
+using grpc::testing::EchoRequest;
+using grpc::testing::EchoResponse;
+
static TString g_root;
-
-static gpr_mu g_mu;
-extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
-gpr_timespec (*gpr_now_impl_orig)(gpr_clock_type clock_type) = gpr_now_impl;
-static int g_time_shift_sec = 0;
-static int g_time_shift_nsec = 0;
-static gpr_timespec now_impl(gpr_clock_type clock) {
- auto ts = gpr_now_impl_orig(clock);
- // We only manipulate the realtime clock to simulate changes in wall-clock
- // time
- if (clock != GPR_CLOCK_REALTIME) {
- return ts;
- }
- GPR_ASSERT(ts.tv_nsec >= 0);
- GPR_ASSERT(ts.tv_nsec < GPR_NS_PER_SEC);
- gpr_mu_lock(&g_mu);
- ts.tv_sec += g_time_shift_sec;
- ts.tv_nsec += g_time_shift_nsec;
- gpr_mu_unlock(&g_mu);
- if (ts.tv_nsec >= GPR_NS_PER_SEC) {
- ts.tv_nsec -= GPR_NS_PER_SEC;
- ++ts.tv_sec;
- } else if (ts.tv_nsec < 0) {
- --ts.tv_sec;
- ts.tv_nsec = GPR_NS_PER_SEC + ts.tv_nsec;
- }
- return ts;
-}
-
-// offset the value returned by gpr_now(GPR_CLOCK_REALTIME) by msecs
-// milliseconds
-static void set_now_offset(int msecs) {
- gpr_mu_lock(&g_mu);
- g_time_shift_sec = msecs / 1000;
- g_time_shift_nsec = (msecs % 1000) * 1e6;
- gpr_mu_unlock(&g_mu);
-}
-
-// restore the original implementation of gpr_now()
-static void reset_now_offset() {
- gpr_mu_lock(&g_mu);
- g_time_shift_sec = 0;
- g_time_shift_nsec = 0;
- gpr_mu_unlock(&g_mu);
-}
-
-namespace grpc {
-namespace testing {
-
-namespace {
-
-// gpr_now() is called with invalid clock_type
-TEST(TimespecTest, GprNowInvalidClockType) {
- // initialize to some junk value
- gpr_clock_type invalid_clock_type = (gpr_clock_type)32641;
- EXPECT_DEATH(gpr_now(invalid_clock_type), ".*");
-}
-
-// Add timespan with negative nanoseconds
-TEST(TimespecTest, GprTimeAddNegativeNs) {
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec bad_ts = {1, -1000, GPR_TIMESPAN};
- EXPECT_DEATH(gpr_time_add(now, bad_ts), ".*");
-}
-
-// Subtract timespan with negative nanoseconds
-TEST(TimespecTest, GprTimeSubNegativeNs) {
- // Nanoseconds must always be positive. Negative timestamps are represented by
- // (negative seconds, positive nanoseconds)
- gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
- gpr_timespec bad_ts = {1, -1000, GPR_TIMESPAN};
- EXPECT_DEATH(gpr_time_sub(now, bad_ts), ".*");
-}
-
-// Add negative milliseconds to gpr_timespec
-TEST(TimespecTest, GrpcNegativeMillisToTimespec) {
- // -1500 milliseconds converts to timespec (-2 secs, 5 * 10^8 nsec)
- gpr_timespec ts = grpc_millis_to_timespec(-1500, GPR_CLOCK_MONOTONIC);
- GPR_ASSERT(ts.tv_sec = -2);
- GPR_ASSERT(ts.tv_nsec = 5e8);
- GPR_ASSERT(ts.clock_type == GPR_CLOCK_MONOTONIC);
-}
-
-class TimeChangeTest : public ::testing::Test {
- protected:
- TimeChangeTest() {}
-
- static void SetUpTestCase() {
- auto port = grpc_pick_unused_port_or_die();
- std::ostringstream addr_stream;
- addr_stream << "localhost:" << port;
- server_address_ = addr_stream.str();
- server_.reset(new SubProcess({
- g_root + "/client_crash_test_server",
- "--address=" + server_address_,
- }));
- GPR_ASSERT(server_);
- // connect to server and make sure it's reachable.
- auto channel =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- GPR_ASSERT(channel);
- EXPECT_TRUE(channel->WaitForConnected(
- grpc_timeout_milliseconds_to_deadline(30000)));
- }
-
- static void TearDownTestCase() { server_.reset(); }
-
- void SetUp() {
- channel_ =
- grpc::CreateChannel(server_address_, InsecureChannelCredentials());
- GPR_ASSERT(channel_);
- stub_ = grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- void TearDown() { reset_now_offset(); }
-
- std::unique_ptr<grpc::testing::EchoTestService::Stub> CreateStub() {
- return grpc::testing::EchoTestService::NewStub(channel_);
- }
-
- std::shared_ptr<Channel> GetChannel() { return channel_; }
- // time jump offsets in milliseconds
- const int TIME_OFFSET1 = 20123;
- const int TIME_OFFSET2 = 5678;
-
- private:
+
+static gpr_mu g_mu;
+extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
+gpr_timespec (*gpr_now_impl_orig)(gpr_clock_type clock_type) = gpr_now_impl;
+static int g_time_shift_sec = 0;
+static int g_time_shift_nsec = 0;
+static gpr_timespec now_impl(gpr_clock_type clock) {
+ auto ts = gpr_now_impl_orig(clock);
+ // We only manipulate the realtime clock to simulate changes in wall-clock
+ // time
+ if (clock != GPR_CLOCK_REALTIME) {
+ return ts;
+ }
+ GPR_ASSERT(ts.tv_nsec >= 0);
+ GPR_ASSERT(ts.tv_nsec < GPR_NS_PER_SEC);
+ gpr_mu_lock(&g_mu);
+ ts.tv_sec += g_time_shift_sec;
+ ts.tv_nsec += g_time_shift_nsec;
+ gpr_mu_unlock(&g_mu);
+ if (ts.tv_nsec >= GPR_NS_PER_SEC) {
+ ts.tv_nsec -= GPR_NS_PER_SEC;
+ ++ts.tv_sec;
+ } else if (ts.tv_nsec < 0) {
+ --ts.tv_sec;
+ ts.tv_nsec = GPR_NS_PER_SEC + ts.tv_nsec;
+ }
+ return ts;
+}
+
+// offset the value returned by gpr_now(GPR_CLOCK_REALTIME) by msecs
+// milliseconds
+static void set_now_offset(int msecs) {
+ gpr_mu_lock(&g_mu);
+ g_time_shift_sec = msecs / 1000;
+ g_time_shift_nsec = (msecs % 1000) * 1e6;
+ gpr_mu_unlock(&g_mu);
+}
+
+// restore the original implementation of gpr_now()
+static void reset_now_offset() {
+ gpr_mu_lock(&g_mu);
+ g_time_shift_sec = 0;
+ g_time_shift_nsec = 0;
+ gpr_mu_unlock(&g_mu);
+}
+
+namespace grpc {
+namespace testing {
+
+namespace {
+
+// gpr_now() is called with invalid clock_type
+TEST(TimespecTest, GprNowInvalidClockType) {
+ // initialize to some junk value
+ gpr_clock_type invalid_clock_type = (gpr_clock_type)32641;
+ EXPECT_DEATH(gpr_now(invalid_clock_type), ".*");
+}
+
+// Add timespan with negative nanoseconds
+TEST(TimespecTest, GprTimeAddNegativeNs) {
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec bad_ts = {1, -1000, GPR_TIMESPAN};
+ EXPECT_DEATH(gpr_time_add(now, bad_ts), ".*");
+}
+
+// Subtract timespan with negative nanoseconds
+TEST(TimespecTest, GprTimeSubNegativeNs) {
+ // Nanoseconds must always be positive. Negative timestamps are represented by
+ // (negative seconds, positive nanoseconds)
+ gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+ gpr_timespec bad_ts = {1, -1000, GPR_TIMESPAN};
+ EXPECT_DEATH(gpr_time_sub(now, bad_ts), ".*");
+}
+
+// Add negative milliseconds to gpr_timespec
+TEST(TimespecTest, GrpcNegativeMillisToTimespec) {
+ // -1500 milliseconds converts to timespec (-2 secs, 5 * 10^8 nsec)
+ gpr_timespec ts = grpc_millis_to_timespec(-1500, GPR_CLOCK_MONOTONIC);
+ GPR_ASSERT(ts.tv_sec = -2);
+ GPR_ASSERT(ts.tv_nsec = 5e8);
+ GPR_ASSERT(ts.clock_type == GPR_CLOCK_MONOTONIC);
+}
+
+class TimeChangeTest : public ::testing::Test {
+ protected:
+ TimeChangeTest() {}
+
+ static void SetUpTestCase() {
+ auto port = grpc_pick_unused_port_or_die();
+ std::ostringstream addr_stream;
+ addr_stream << "localhost:" << port;
+ server_address_ = addr_stream.str();
+ server_.reset(new SubProcess({
+ g_root + "/client_crash_test_server",
+ "--address=" + server_address_,
+ }));
+ GPR_ASSERT(server_);
+ // connect to server and make sure it's reachable.
+ auto channel =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ GPR_ASSERT(channel);
+ EXPECT_TRUE(channel->WaitForConnected(
+ grpc_timeout_milliseconds_to_deadline(30000)));
+ }
+
+ static void TearDownTestCase() { server_.reset(); }
+
+ void SetUp() {
+ channel_ =
+ grpc::CreateChannel(server_address_, InsecureChannelCredentials());
+ GPR_ASSERT(channel_);
+ stub_ = grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ void TearDown() { reset_now_offset(); }
+
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> CreateStub() {
+ return grpc::testing::EchoTestService::NewStub(channel_);
+ }
+
+ std::shared_ptr<Channel> GetChannel() { return channel_; }
+ // time jump offsets in milliseconds
+ const int TIME_OFFSET1 = 20123;
+ const int TIME_OFFSET2 = 5678;
+
+ private:
static TString server_address_;
- static std::unique_ptr<SubProcess> server_;
- std::shared_ptr<Channel> channel_;
- std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
-};
+ static std::unique_ptr<SubProcess> server_;
+ std::shared_ptr<Channel> channel_;
+ std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
+};
TString TimeChangeTest::server_address_;
-std::unique_ptr<SubProcess> TimeChangeTest::server_;
-
-// Wall-clock time jumps forward on client before bidi stream is created
-TEST_F(TimeChangeTest, TimeJumpForwardBeforeStreamCreated) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
- context.AddMetadata(kServerResponseStreamsToSend, "1");
-
- auto channel = GetChannel();
- GPR_ASSERT(channel);
- EXPECT_TRUE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
- auto stub = CreateStub();
-
- // time jumps forward by TIME_OFFSET1 milliseconds
- set_now_offset(TIME_OFFSET1);
- auto stream = stub->BidiStream(&context);
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
-
- EXPECT_TRUE(stream->WritesDone());
- EXPECT_TRUE(stream->Read(&response));
-
- auto status = stream->Finish();
- EXPECT_TRUE(status.ok());
-}
-
-// Wall-clock time jumps back on client before bidi stream is created
-TEST_F(TimeChangeTest, TimeJumpBackBeforeStreamCreated) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
- context.AddMetadata(kServerResponseStreamsToSend, "1");
-
- auto channel = GetChannel();
- GPR_ASSERT(channel);
- EXPECT_TRUE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
- auto stub = CreateStub();
-
- // time jumps back by TIME_OFFSET1 milliseconds
- set_now_offset(-TIME_OFFSET1);
- auto stream = stub->BidiStream(&context);
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
-
- EXPECT_TRUE(stream->WritesDone());
- EXPECT_TRUE(stream->Read(&response));
- EXPECT_EQ(request.message(), response.message());
-
- auto status = stream->Finish();
- EXPECT_TRUE(status.ok());
-}
-
-// Wall-clock time jumps forward on client while call is in progress
-TEST_F(TimeChangeTest, TimeJumpForwardAfterStreamCreated) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
- context.AddMetadata(kServerResponseStreamsToSend, "2");
-
- auto channel = GetChannel();
- GPR_ASSERT(channel);
- EXPECT_TRUE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
- auto stub = CreateStub();
-
- auto stream = stub->BidiStream(&context);
-
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
-
- // time jumps forward by TIME_OFFSET1 milliseconds.
- set_now_offset(TIME_OFFSET1);
-
- request.set_message("World");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->WritesDone());
- EXPECT_TRUE(stream->Read(&response));
-
- auto status = stream->Finish();
- EXPECT_TRUE(status.ok());
-}
-
-// Wall-clock time jumps back on client while call is in progress
-TEST_F(TimeChangeTest, TimeJumpBackAfterStreamCreated) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
- context.AddMetadata(kServerResponseStreamsToSend, "2");
-
- auto channel = GetChannel();
- GPR_ASSERT(channel);
- EXPECT_TRUE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
- auto stub = CreateStub();
-
- auto stream = stub->BidiStream(&context);
-
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->Read(&response));
-
- // time jumps back TIME_OFFSET1 milliseconds.
- set_now_offset(-TIME_OFFSET1);
-
- request.set_message("World");
- EXPECT_TRUE(stream->Write(request));
- EXPECT_TRUE(stream->WritesDone());
- EXPECT_TRUE(stream->Read(&response));
-
- auto status = stream->Finish();
- EXPECT_TRUE(status.ok());
-}
-
-// Wall-clock time jumps forward and backwards during call
-TEST_F(TimeChangeTest, TimeJumpForwardAndBackDuringCall) {
- EchoRequest request;
- EchoResponse response;
- ClientContext context;
- context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
- context.AddMetadata(kServerResponseStreamsToSend, "2");
-
- auto channel = GetChannel();
- GPR_ASSERT(channel);
-
- EXPECT_TRUE(
- channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
- auto stub = CreateStub();
- auto stream = stub->BidiStream(&context);
-
- request.set_message("Hello");
- EXPECT_TRUE(stream->Write(request));
-
- // time jumps back by TIME_OFFSET2 milliseconds
- set_now_offset(-TIME_OFFSET2);
-
- EXPECT_TRUE(stream->Read(&response));
- request.set_message("World");
-
- // time jumps forward by TIME_OFFSET milliseconds
- set_now_offset(TIME_OFFSET1);
-
- EXPECT_TRUE(stream->Write(request));
-
- // time jumps back by TIME_OFFSET2 milliseconds
- set_now_offset(-TIME_OFFSET2);
-
- EXPECT_TRUE(stream->WritesDone());
-
- // time jumps back by TIME_OFFSET2 milliseconds
- set_now_offset(-TIME_OFFSET2);
-
- EXPECT_TRUE(stream->Read(&response));
-
- // time jumps back by TIME_OFFSET2 milliseconds
- set_now_offset(-TIME_OFFSET2);
-
- auto status = stream->Finish();
- EXPECT_TRUE(status.ok());
-}
-
-} // namespace
-
-} // namespace testing
-} // namespace grpc
-
-int main(int argc, char** argv) {
+std::unique_ptr<SubProcess> TimeChangeTest::server_;
+
+// Wall-clock time jumps forward on client before bidi stream is created
+TEST_F(TimeChangeTest, TimeJumpForwardBeforeStreamCreated) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
+ context.AddMetadata(kServerResponseStreamsToSend, "1");
+
+ auto channel = GetChannel();
+ GPR_ASSERT(channel);
+ EXPECT_TRUE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
+ auto stub = CreateStub();
+
+ // time jumps forward by TIME_OFFSET1 milliseconds
+ set_now_offset(TIME_OFFSET1);
+ auto stream = stub->BidiStream(&context);
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+
+ EXPECT_TRUE(stream->WritesDone());
+ EXPECT_TRUE(stream->Read(&response));
+
+ auto status = stream->Finish();
+ EXPECT_TRUE(status.ok());
+}
+
+// Wall-clock time jumps back on client before bidi stream is created
+TEST_F(TimeChangeTest, TimeJumpBackBeforeStreamCreated) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
+ context.AddMetadata(kServerResponseStreamsToSend, "1");
+
+ auto channel = GetChannel();
+ GPR_ASSERT(channel);
+ EXPECT_TRUE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
+ auto stub = CreateStub();
+
+ // time jumps back by TIME_OFFSET1 milliseconds
+ set_now_offset(-TIME_OFFSET1);
+ auto stream = stub->BidiStream(&context);
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+
+ EXPECT_TRUE(stream->WritesDone());
+ EXPECT_TRUE(stream->Read(&response));
+ EXPECT_EQ(request.message(), response.message());
+
+ auto status = stream->Finish();
+ EXPECT_TRUE(status.ok());
+}
+
+// Wall-clock time jumps forward on client while call is in progress
+TEST_F(TimeChangeTest, TimeJumpForwardAfterStreamCreated) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
+ context.AddMetadata(kServerResponseStreamsToSend, "2");
+
+ auto channel = GetChannel();
+ GPR_ASSERT(channel);
+ EXPECT_TRUE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
+ auto stub = CreateStub();
+
+ auto stream = stub->BidiStream(&context);
+
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+
+ // time jumps forward by TIME_OFFSET1 milliseconds.
+ set_now_offset(TIME_OFFSET1);
+
+ request.set_message("World");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->WritesDone());
+ EXPECT_TRUE(stream->Read(&response));
+
+ auto status = stream->Finish();
+ EXPECT_TRUE(status.ok());
+}
+
+// Wall-clock time jumps back on client while call is in progress
+TEST_F(TimeChangeTest, TimeJumpBackAfterStreamCreated) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
+ context.AddMetadata(kServerResponseStreamsToSend, "2");
+
+ auto channel = GetChannel();
+ GPR_ASSERT(channel);
+ EXPECT_TRUE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
+ auto stub = CreateStub();
+
+ auto stream = stub->BidiStream(&context);
+
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->Read(&response));
+
+ // time jumps back TIME_OFFSET1 milliseconds.
+ set_now_offset(-TIME_OFFSET1);
+
+ request.set_message("World");
+ EXPECT_TRUE(stream->Write(request));
+ EXPECT_TRUE(stream->WritesDone());
+ EXPECT_TRUE(stream->Read(&response));
+
+ auto status = stream->Finish();
+ EXPECT_TRUE(status.ok());
+}
+
+// Wall-clock time jumps forward and backwards during call
+TEST_F(TimeChangeTest, TimeJumpForwardAndBackDuringCall) {
+ EchoRequest request;
+ EchoResponse response;
+ ClientContext context;
+ context.set_deadline(grpc_timeout_milliseconds_to_deadline(5000));
+ context.AddMetadata(kServerResponseStreamsToSend, "2");
+
+ auto channel = GetChannel();
+ GPR_ASSERT(channel);
+
+ EXPECT_TRUE(
+ channel->WaitForConnected(grpc_timeout_milliseconds_to_deadline(5000)));
+ auto stub = CreateStub();
+ auto stream = stub->BidiStream(&context);
+
+ request.set_message("Hello");
+ EXPECT_TRUE(stream->Write(request));
+
+ // time jumps back by TIME_OFFSET2 milliseconds
+ set_now_offset(-TIME_OFFSET2);
+
+ EXPECT_TRUE(stream->Read(&response));
+ request.set_message("World");
+
+ // time jumps forward by TIME_OFFSET milliseconds
+ set_now_offset(TIME_OFFSET1);
+
+ EXPECT_TRUE(stream->Write(request));
+
+ // time jumps back by TIME_OFFSET2 milliseconds
+ set_now_offset(-TIME_OFFSET2);
+
+ EXPECT_TRUE(stream->WritesDone());
+
+ // time jumps back by TIME_OFFSET2 milliseconds
+ set_now_offset(-TIME_OFFSET2);
+
+ EXPECT_TRUE(stream->Read(&response));
+
+ // time jumps back by TIME_OFFSET2 milliseconds
+ set_now_offset(-TIME_OFFSET2);
+
+ auto status = stream->Finish();
+ EXPECT_TRUE(status.ok());
+}
+
+} // namespace
+
+} // namespace testing
+} // namespace grpc
+
+int main(int argc, char** argv) {
TString me = argv[0];
- // get index of last slash in path to test binary
- auto lslash = me.rfind('/');
- // set g_root = path to directory containing test binary
+ // get index of last slash in path to test binary
+ auto lslash = me.rfind('/');
+ // set g_root = path to directory containing test binary
if (lslash != TString::npos) {
- g_root = me.substr(0, lslash);
- } else {
- g_root = ".";
- }
-
- gpr_mu_init(&g_mu);
- gpr_now_impl = now_impl;
-
- grpc::testing::TestEnvironment env(argc, argv);
- ::testing::InitGoogleTest(&argc, argv);
- auto ret = RUN_ALL_TESTS();
- return ret;
-}
+ g_root = me.substr(0, lslash);
+ } else {
+ g_root = ".";
+ }
+
+ gpr_mu_init(&g_mu);
+ gpr_now_impl = now_impl;
+
+ grpc::testing::TestEnvironment env(argc, argv);
+ ::testing::InitGoogleTest(&argc, argv);
+ auto ret = RUN_ALL_TESTS();
+ return ret;
+}
diff --git a/contrib/libs/grpc/test/cpp/end2end/ya.make b/contrib/libs/grpc/test/cpp/end2end/ya.make
index 56b0d699f3..b9c1dc7fe0 100644
--- a/contrib/libs/grpc/test/cpp/end2end/ya.make
+++ b/contrib/libs/grpc/test/cpp/end2end/ya.make
@@ -1,67 +1,67 @@
-LIBRARY()
-
+LIBRARY()
+
LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
+
OWNER(dvshkurko)
-PEERDIR(
- contrib/libs/grpc/src/proto/grpc/health/v1
- contrib/libs/grpc/src/proto/grpc/testing
- contrib/libs/grpc/src/proto/grpc/testing/duplicate
- contrib/libs/grpc/test/cpp/util
- contrib/libs/grpc
+PEERDIR(
+ contrib/libs/grpc/src/proto/grpc/health/v1
+ contrib/libs/grpc/src/proto/grpc/testing
+ contrib/libs/grpc/src/proto/grpc/testing/duplicate
+ contrib/libs/grpc/test/cpp/util
+ contrib/libs/grpc
contrib/restricted/googletest/googlemock
contrib/restricted/googletest/googletest
-)
-
+)
+
ADDINCL(
${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
contrib/libs/grpc
)
-
-NO_COMPILER_WARNINGS()
-
-SRCS(
- # async_end2end_test.cc
- # channelz_service_test.cc
- # client_callback_end2end_test.cc
- # client_crash_test.cc
- # client_crash_test_server.cc
- # client_interceptors_end2end_test.cc
- # client_lb_end2end_test.cc lb needs opencensus, not enabled.
- # end2end_test.cc
- # exception_test.cc
- # filter_end2end_test.cc
- # generic_end2end_test.cc
- # grpclb_end2end_test.cc lb needs opencensus, not enabled.
- # health_service_end2end_test.cc
- # hybrid_end2end_test.cc
- interceptors_util.cc
- # mock_test.cc
- # nonblocking_test.cc
- # proto_server_reflection_test.cc
- # raw_end2end_test.cc
- # server_builder_plugin_test.cc
- # server_crash_test.cc
- # server_crash_test_client.cc
- # server_early_return_test.cc
- # server_interceptors_end2end_test.cc
- # server_load_reporting_end2end_test.cc
- # shutdown_test.cc
- # streaming_throughput_test.cc
- test_health_check_service_impl.cc
- test_service_impl.cc
- # thread_stress_test.cc
- # time_change_test.cc
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- health
- server_interceptors
- # Needs new gtest
- # thread
-)
+
+NO_COMPILER_WARNINGS()
+
+SRCS(
+ # async_end2end_test.cc
+ # channelz_service_test.cc
+ # client_callback_end2end_test.cc
+ # client_crash_test.cc
+ # client_crash_test_server.cc
+ # client_interceptors_end2end_test.cc
+ # client_lb_end2end_test.cc lb needs opencensus, not enabled.
+ # end2end_test.cc
+ # exception_test.cc
+ # filter_end2end_test.cc
+ # generic_end2end_test.cc
+ # grpclb_end2end_test.cc lb needs opencensus, not enabled.
+ # health_service_end2end_test.cc
+ # hybrid_end2end_test.cc
+ interceptors_util.cc
+ # mock_test.cc
+ # nonblocking_test.cc
+ # proto_server_reflection_test.cc
+ # raw_end2end_test.cc
+ # server_builder_plugin_test.cc
+ # server_crash_test.cc
+ # server_crash_test_client.cc
+ # server_early_return_test.cc
+ # server_interceptors_end2end_test.cc
+ # server_load_reporting_end2end_test.cc
+ # shutdown_test.cc
+ # streaming_throughput_test.cc
+ test_health_check_service_impl.cc
+ test_service_impl.cc
+ # thread_stress_test.cc
+ # time_change_test.cc
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ health
+ server_interceptors
+ # Needs new gtest
+ # thread
+)
diff --git a/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc b/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc
index 2354b46f51..c63f351a8f 100644
--- a/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc
+++ b/contrib/libs/grpc/test/cpp/util/byte_buffer_test.cc
@@ -38,13 +38,13 @@ namespace {
const char* kContent1 = "hello xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
const char* kContent2 = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy world";
-class ByteBufferTest : public ::testing::Test {
- protected:
- static void SetUpTestCase() { grpc_init(); }
+class ByteBufferTest : public ::testing::Test {
+ protected:
+ static void SetUpTestCase() { grpc_init(); }
+
+ static void TearDownTestCase() { grpc_shutdown(); }
+};
- static void TearDownTestCase() { grpc_shutdown(); }
-};
-
TEST_F(ByteBufferTest, CopyCtor) {
ByteBuffer buffer1;
EXPECT_FALSE(buffer1.Valid());
diff --git a/contrib/libs/grpc/test/cpp/util/cli_call.h b/contrib/libs/grpc/test/cpp/util/cli_call.h
index 5284b00084..79d00d99f4 100644
--- a/contrib/libs/grpc/test/cpp/util/cli_call.h
+++ b/contrib/libs/grpc/test/cpp/util/cli_call.h
@@ -27,7 +27,7 @@
#include <map>
-namespace grpc {
+namespace grpc {
class ClientContext;
diff --git a/contrib/libs/grpc/test/cpp/util/cli_call_test.cc b/contrib/libs/grpc/test/cpp/util/cli_call_test.cc
index e99c6a28b9..4f0544b2e5 100644
--- a/contrib/libs/grpc/test/cpp/util/cli_call_test.cc
+++ b/contrib/libs/grpc/test/cpp/util/cli_call_test.cc
@@ -74,8 +74,8 @@ class CliCallTest : public ::testing::Test {
void TearDown() override { server_->Shutdown(); }
void ResetStub() {
- channel_ = grpc::CreateChannel(server_address_.str(),
- InsecureChannelCredentials());
+ channel_ = grpc::CreateChannel(server_address_.str(),
+ InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
}
diff --git a/contrib/libs/grpc/test/cpp/util/cli_credentials.cc b/contrib/libs/grpc/test/cpp/util/cli_credentials.cc
index e7127f9296..efd548eb9b 100644
--- a/contrib/libs/grpc/test/cpp/util/cli_credentials.cc
+++ b/contrib/libs/grpc/test/cpp/util/cli_credentials.cc
@@ -41,11 +41,11 @@ DEFINE_string(
"validation.");
DEFINE_string(
ssl_client_cert, "",
- "If not empty, load this PEM formatted client certificate file. Requires "
+ "If not empty, load this PEM formatted client certificate file. Requires "
"use of --ssl_client_key.");
DEFINE_string(
ssl_client_key, "",
- "If not empty, load this PEM formatted private key. Requires use of "
+ "If not empty, load this PEM formatted private key. Requires use of "
"--ssl_client_cert");
DEFINE_string(
local_connect_type, "local_tcp",
diff --git a/contrib/libs/grpc/test/cpp/util/create_test_channel.cc b/contrib/libs/grpc/test/cpp/util/create_test_channel.cc
index d660428537..86d8e22af1 100644
--- a/contrib/libs/grpc/test/cpp/util/create_test_channel.cc
+++ b/contrib/libs/grpc/test/cpp/util/create_test_channel.cc
@@ -42,8 +42,8 @@ const char kProdTlsCredentialsType[] = "prod_ssl";
class SslCredentialProvider : public testing::CredentialTypeProvider {
public:
std::shared_ptr<ChannelCredentials> GetChannelCredentials(
- grpc::ChannelArguments* /*args*/) override {
- return grpc::SslCredentials(SslCredentialsOptions());
+ grpc::ChannelArguments* /*args*/) override {
+ return grpc::SslCredentials(SslCredentialsOptions());
}
std::shared_ptr<ServerCredentials> GetServerCredentials() override {
return nullptr;
@@ -90,76 +90,76 @@ std::shared_ptr<Channel> CreateTestChannel(
const TString& override_hostname, bool use_prod_roots,
const std::shared_ptr<CallCredentials>& creds,
const ChannelArguments& args) {
- return CreateTestChannel(server, cred_type, override_hostname, use_prod_roots,
- creds, args,
- /*interceptor_creators=*/{});
-}
-
-std::shared_ptr<Channel> CreateTestChannel(
+ return CreateTestChannel(server, cred_type, override_hostname, use_prod_roots,
+ creds, args,
+ /*interceptor_creators=*/{});
+}
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
- testing::transport_security security_type, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds,
- const ChannelArguments& args) {
- return CreateTestChannel(server, override_hostname, security_type,
- use_prod_roots, creds, args,
- /*interceptor_creators=*/{});
-}
-
-std::shared_ptr<Channel> CreateTestChannel(
+ testing::transport_security security_type, bool use_prod_roots,
+ const std::shared_ptr<CallCredentials>& creds,
+ const ChannelArguments& args) {
+ return CreateTestChannel(server, override_hostname, security_type,
+ use_prod_roots, creds, args,
+ /*interceptor_creators=*/{});
+}
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
- testing::transport_security security_type, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds) {
- return CreateTestChannel(server, override_hostname, security_type,
- use_prod_roots, creds, ChannelArguments());
-}
-
-std::shared_ptr<Channel> CreateTestChannel(
+ testing::transport_security security_type, bool use_prod_roots,
+ const std::shared_ptr<CallCredentials>& creds) {
+ return CreateTestChannel(server, override_hostname, security_type,
+ use_prod_roots, creds, ChannelArguments());
+}
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
- testing::transport_security security_type, bool use_prod_roots) {
- return CreateTestChannel(server, override_hostname, security_type,
- use_prod_roots, std::shared_ptr<CallCredentials>());
-}
-
-// Shortcut for end2end and interop tests.
-std::shared_ptr<Channel> CreateTestChannel(
+ testing::transport_security security_type, bool use_prod_roots) {
+ return CreateTestChannel(server, override_hostname, security_type,
+ use_prod_roots, std::shared_ptr<CallCredentials>());
+}
+
+// Shortcut for end2end and interop tests.
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, testing::transport_security security_type) {
- return CreateTestChannel(server, "foo.test.google.fr", security_type, false);
-}
-
-std::shared_ptr<Channel> CreateTestChannel(
+ return CreateTestChannel(server, "foo.test.google.fr", security_type, false);
+}
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& credential_type,
- const std::shared_ptr<CallCredentials>& creds) {
- ChannelArguments channel_args;
+ const std::shared_ptr<CallCredentials>& creds) {
+ ChannelArguments channel_args;
MaybeSetCustomChannelArgs(&channel_args);
- std::shared_ptr<ChannelCredentials> channel_creds =
- testing::GetCredentialsProvider()->GetChannelCredentials(credential_type,
- &channel_args);
- GPR_ASSERT(channel_creds != nullptr);
- if (creds.get()) {
- channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds);
- }
- return ::grpc::CreateCustomChannel(server, channel_creds, channel_args);
-}
-
-std::shared_ptr<Channel> CreateTestChannel(
+ std::shared_ptr<ChannelCredentials> channel_creds =
+ testing::GetCredentialsProvider()->GetChannelCredentials(credential_type,
+ &channel_args);
+ GPR_ASSERT(channel_creds != nullptr);
+ if (creds.get()) {
+ channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds);
+ }
+ return ::grpc::CreateCustomChannel(server, channel_creds, channel_args);
+}
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& cred_type,
const TString& override_hostname, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators) {
+ const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
ChannelArguments channel_args(args);
MaybeSetCustomChannelArgs(&channel_args);
std::shared_ptr<ChannelCredentials> channel_creds;
if (cred_type.empty()) {
- if (interceptor_creators.empty()) {
- return ::grpc::CreateCustomChannel(server, InsecureChannelCredentials(),
+ if (interceptor_creators.empty()) {
+ return ::grpc::CreateCustomChannel(server, InsecureChannelCredentials(),
channel_args);
- } else {
- return experimental::CreateCustomChannelWithInterceptors(
+ } else {
+ return experimental::CreateCustomChannelWithInterceptors(
server, InsecureChannelCredentials(), channel_args,
- std::move(interceptor_creators));
- }
+ std::move(interceptor_creators));
+ }
} else if (cred_type == testing::kTlsCredentialsType) { // cred_type == "ssl"
if (use_prod_roots) {
gpr_once_init(&g_once_init_add_prod_ssl_provider, &AddProdSslType);
@@ -177,65 +177,65 @@ std::shared_ptr<Channel> CreateTestChannel(
const TString& connect_to = server.empty() ? override_hostname : server;
if (creds.get()) {
- channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds);
+ channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds);
+ }
+ if (interceptor_creators.empty()) {
+ return ::grpc::CreateCustomChannel(connect_to, channel_creds,
+ channel_args);
+ } else {
+ return experimental::CreateCustomChannelWithInterceptors(
+ connect_to, channel_creds, channel_args,
+ std::move(interceptor_creators));
}
- if (interceptor_creators.empty()) {
- return ::grpc::CreateCustomChannel(connect_to, channel_creds,
- channel_args);
- } else {
- return experimental::CreateCustomChannelWithInterceptors(
- connect_to, channel_creds, channel_args,
- std::move(interceptor_creators));
- }
} else {
channel_creds = testing::GetCredentialsProvider()->GetChannelCredentials(
cred_type, &channel_args);
GPR_ASSERT(channel_creds != nullptr);
- if (interceptor_creators.empty()) {
+ if (interceptor_creators.empty()) {
return ::grpc::CreateCustomChannel(server, channel_creds, channel_args);
- } else {
- return experimental::CreateCustomChannelWithInterceptors(
+ } else {
+ return experimental::CreateCustomChannelWithInterceptors(
server, channel_creds, channel_args, std::move(interceptor_creators));
- }
+ }
}
}
std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
testing::transport_security security_type, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators) {
+ const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
TString credential_type =
security_type == testing::ALTS
? testing::kAltsCredentialsType
: (security_type == testing::TLS ? testing::kTlsCredentialsType
: testing::kInsecureCredentialsType);
- return CreateTestChannel(server, credential_type, override_hostname,
- use_prod_roots, creds, args,
- std::move(interceptor_creators));
+ return CreateTestChannel(server, credential_type, override_hostname,
+ use_prod_roots, creds, args,
+ std::move(interceptor_creators));
}
std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
testing::transport_security security_type, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators) {
+ const std::shared_ptr<CallCredentials>& creds,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
return CreateTestChannel(server, override_hostname, security_type,
- use_prod_roots, creds, ChannelArguments(),
- std::move(interceptor_creators));
+ use_prod_roots, creds, ChannelArguments(),
+ std::move(interceptor_creators));
}
std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& credential_type,
- const std::shared_ptr<CallCredentials>& creds,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators) {
+ const std::shared_ptr<CallCredentials>& creds,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators) {
ChannelArguments channel_args;
MaybeSetCustomChannelArgs(&channel_args);
std::shared_ptr<ChannelCredentials> channel_creds =
@@ -243,10 +243,10 @@ std::shared_ptr<Channel> CreateTestChannel(
&channel_args);
GPR_ASSERT(channel_creds != nullptr);
if (creds.get()) {
- channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds);
+ channel_creds = grpc::CompositeChannelCredentials(channel_creds, creds);
}
- return experimental::CreateCustomChannelWithInterceptors(
- server, channel_creds, channel_args, std::move(interceptor_creators));
+ return experimental::CreateCustomChannelWithInterceptors(
+ server, channel_creds, channel_args, std::move(interceptor_creators));
}
} // namespace grpc
diff --git a/contrib/libs/grpc/test/cpp/util/create_test_channel.h b/contrib/libs/grpc/test/cpp/util/create_test_channel.h
index 9ce9ca7fc5..ed4ce6c11b 100644
--- a/contrib/libs/grpc/test/cpp/util/create_test_channel.h
+++ b/contrib/libs/grpc/test/cpp/util/create_test_channel.h
@@ -21,10 +21,10 @@
#include <memory>
-#include <grpcpp/channel.h>
-#include <grpcpp/impl/codegen/client_interceptor.h>
+#include <grpcpp/channel.h>
+#include <grpcpp/impl/codegen/client_interceptor.h>
#include <grpcpp/security/credentials.h>
-#include <grpcpp/support/channel_arguments.h>
+#include <grpcpp/support/channel_arguments.h>
namespace grpc {
class Channel;
@@ -63,37 +63,37 @@ std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& credential_type,
const std::shared_ptr<CallCredentials>& creds);
-std::shared_ptr<Channel> CreateTestChannel(
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
- testing::transport_security security_type, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
-std::shared_ptr<Channel> CreateTestChannel(
+ testing::transport_security security_type, bool use_prod_roots,
+ const std::shared_ptr<CallCredentials>& creds,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& override_hostname,
- testing::transport_security security_type, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
-std::shared_ptr<Channel> CreateTestChannel(
+ testing::transport_security security_type, bool use_prod_roots,
+ const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& cred_type,
const TString& override_hostname, bool use_prod_roots,
- const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
-std::shared_ptr<Channel> CreateTestChannel(
+ const std::shared_ptr<CallCredentials>& creds, const ChannelArguments& args,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
+std::shared_ptr<Channel> CreateTestChannel(
const TString& server, const TString& credential_type,
- const std::shared_ptr<CallCredentials>& creds,
- std::vector<
- std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
- interceptor_creators);
-
+ const std::shared_ptr<CallCredentials>& creds,
+ std::vector<
+ std::unique_ptr<experimental::ClientInterceptorFactoryInterface>>
+ interceptor_creators);
+
} // namespace grpc
#endif // GRPC_TEST_CPP_UTIL_CREATE_TEST_CHANNEL_H
diff --git a/contrib/libs/grpc/test/cpp/util/grpc_tool.cc b/contrib/libs/grpc/test/cpp/util/grpc_tool.cc
index 850ff70803..30f3024e25 100644
--- a/contrib/libs/grpc/test/cpp/util/grpc_tool.cc
+++ b/contrib/libs/grpc/test/cpp/util/grpc_tool.cc
@@ -233,8 +233,8 @@ std::shared_ptr<grpc::Channel> CreateCliChannel(
args.SetString(GRPC_ARG_SERVICE_CONFIG,
FLAGS_default_service_config.c_str());
}
- return ::grpc::CreateCustomChannel(server_address, cred.GetCredentials(),
- args);
+ return ::grpc::CreateCustomChannel(server_address, cred.GetCredentials(),
+ args);
}
struct Command {
@@ -443,7 +443,7 @@ bool GrpcTool::ListServices(int argc, const char** argv,
return callback(output);
}
-bool GrpcTool::PrintType(int /*argc*/, const char** argv,
+bool GrpcTool::PrintType(int /*argc*/, const char** argv,
const CliCredentials& cred,
GrpcToolOutputCallback callback) {
CommandUsage(
@@ -485,8 +485,8 @@ bool GrpcTool::CallMethod(int argc, const char** argv,
" fallback when parsing request/response\n"
" --proto_path ; The search path of proto files, valid"
" only when --protofiles is given\n"
- " --noremotedb ; Don't attempt to use reflection service"
- " at all\n"
+ " --noremotedb ; Don't attempt to use reflection service"
+ " at all\n"
" --metadata ; The metadata to be sent to the server\n"
" --infile ; Input filename (defaults to stdin)\n"
" --outfile ; Output filename (defaults to stdout)\n"
@@ -618,7 +618,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv,
call.WritesDoneAndWait();
read_thread.join();
- gpr_mu_destroy(&parser_mu);
+ gpr_mu_destroy(&parser_mu);
std::multimap<grpc::string_ref, grpc::string_ref> server_trailing_metadata;
Status status = call.Finish(&server_trailing_metadata);
@@ -847,8 +847,8 @@ bool GrpcTool::ParseMessage(int argc, const char** argv,
" fallback when parsing request/response\n"
" --proto_path ; The search path of proto files, valid"
" only when --protofiles is given\n"
- " --noremotedb ; Don't attempt to use reflection service"
- " at all\n"
+ " --noremotedb ; Don't attempt to use reflection service"
+ " at all\n"
" --infile ; Input filename (defaults to stdin)\n"
" --outfile ; Output filename (defaults to stdout)\n"
" --binary_input ; Input in binary format\n"
diff --git a/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc b/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc
index ebabc74772..ff610daadd 100644
--- a/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc
+++ b/contrib/libs/grpc/test/cpp/util/grpc_tool_test.cc
@@ -471,9 +471,9 @@ TEST_F(GrpcToolTest, TypeNotFound) {
const char* argv[] = {"grpc_cli", "type", server_address.c_str(),
"grpc.testing.DummyRequest"};
- EXPECT_TRUE(1 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(),
- std::bind(PrintStream, &output_stream,
- std::placeholders::_1)));
+ EXPECT_TRUE(1 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(),
+ std::bind(PrintStream, &output_stream,
+ std::placeholders::_1)));
ShutdownServer();
}
diff --git a/contrib/libs/grpc/test/cpp/util/metrics_server.cc b/contrib/libs/grpc/test/cpp/util/metrics_server.cc
index 69a17cf384..0493da053e 100644
--- a/contrib/libs/grpc/test/cpp/util/metrics_server.cc
+++ b/contrib/libs/grpc/test/cpp/util/metrics_server.cc
@@ -51,7 +51,7 @@ long QpsGauge::Get() {
}
grpc::Status MetricsServiceImpl::GetAllGauges(
- ServerContext* /*context*/, const EmptyMessage* /*request*/,
+ ServerContext* /*context*/, const EmptyMessage* /*request*/,
ServerWriter<GaugeResponse>* writer) {
gpr_log(GPR_DEBUG, "GetAllGauges called");
@@ -66,7 +66,7 @@ grpc::Status MetricsServiceImpl::GetAllGauges(
return Status::OK;
}
-grpc::Status MetricsServiceImpl::GetGauge(ServerContext* /*context*/,
+grpc::Status MetricsServiceImpl::GetGauge(ServerContext* /*context*/,
const GaugeRequest* request,
GaugeResponse* response) {
std::lock_guard<std::mutex> lock(mu_);
diff --git a/contrib/libs/grpc/test/cpp/util/metrics_server.h b/contrib/libs/grpc/test/cpp/util/metrics_server.h
index 2c3c358d1c..10ffa7b4dd 100644
--- a/contrib/libs/grpc/test/cpp/util/metrics_server.h
+++ b/contrib/libs/grpc/test/cpp/util/metrics_server.h
@@ -21,8 +21,8 @@
#include <map>
#include <mutex>
-#include <grpcpp/server.h>
-
+#include <grpcpp/server.h>
+
#include "src/proto/grpc/testing/metrics.grpc.pb.h"
#include "src/proto/grpc/testing/metrics.pb.h"
diff --git a/contrib/libs/grpc/test/cpp/util/proto_file_parser.h b/contrib/libs/grpc/test/cpp/util/proto_file_parser.h
index 8ae383001c..c0445641c7 100644
--- a/contrib/libs/grpc/test/cpp/util/proto_file_parser.h
+++ b/contrib/libs/grpc/test/cpp/util/proto_file_parser.h
@@ -62,7 +62,7 @@ class ProtoFileParser {
/// \param is_json_format if \c true the \c formatted_proto is treated as a
/// json-formatted proto, otherwise it is treated as a text-formatted
/// proto
- /// \return the serialised binary proto representation of \c formatted_proto
+ /// \return the serialised binary proto representation of \c formatted_proto
TString GetSerializedProtoFromMethod(const TString& method,
const TString& formatted_proto,
bool is_request,
@@ -71,7 +71,7 @@ class ProtoFileParser {
/// Converts a text or json string to its proto representation for the given
/// message type.
/// \param formatted_proto the text- or json-formatted proto string
- /// \return the serialised binary proto representation of \c formatted_proto
+ /// \return the serialised binary proto representation of \c formatted_proto
TString GetSerializedProtoFromMessageType(
const TString& message_type_name, const TString& formatted_proto,
bool is_json_format);
diff --git a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc
index 488f382999..27a4c1e4cf 100644
--- a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc
+++ b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.cc
@@ -44,16 +44,16 @@ ProtoReflectionDescriptorDatabase::~ProtoReflectionDescriptorDatabase() {
Status status = stream_->Finish();
if (!status.ok()) {
if (status.error_code() == StatusCode::UNIMPLEMENTED) {
- fprintf(stderr,
+ fprintf(stderr,
"Reflection request not implemented; "
- "is the ServerReflection service enabled?\n");
- } else {
- fprintf(stderr,
- "ServerReflectionInfo rpc failed. Error code: %d, message: %s, "
- "debug info: %s\n",
- static_cast<int>(status.error_code()),
- status.error_message().c_str(),
- ctx_.debug_error_string().c_str());
+ "is the ServerReflection service enabled?\n");
+ } else {
+ fprintf(stderr,
+ "ServerReflectionInfo rpc failed. Error code: %d, message: %s, "
+ "debug info: %s\n",
+ static_cast<int>(status.error_code()),
+ status.error_message().c_str(),
+ ctx_.debug_error_string().c_str());
}
}
}
diff --git a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h
index 6f6244278e..cdd6f0cccd 100644
--- a/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h
+++ b/contrib/libs/grpc/test/cpp/util/proto_reflection_descriptor_database.h
@@ -44,7 +44,7 @@ class ProtoReflectionDescriptorDatabase : public protobuf::DescriptorDatabase {
// The following four methods implement DescriptorDatabase interfaces.
//
- // Find a file by file name. Fills in *output and returns true if found.
+ // Find a file by file name. Fills in *output and returns true if found.
// Otherwise, returns false, leaving the contents of *output undefined.
bool FindFileByName(const google::protobuf::string& filename,
protobuf::FileDescriptorProto* output) override;
diff --git a/contrib/libs/grpc/test/cpp/util/slice_test.cc b/contrib/libs/grpc/test/cpp/util/slice_test.cc
index eeab4b298c..d7e945ae38 100644
--- a/contrib/libs/grpc/test/cpp/util/slice_test.cc
+++ b/contrib/libs/grpc/test/cpp/util/slice_test.cc
@@ -35,10 +35,10 @@ const char* kContent = "hello xxxxxxxxxxxxxxxxxxxx world";
class SliceTest : public ::testing::Test {
protected:
- static void SetUpTestCase() { grpc_init(); }
-
- static void TearDownTestCase() { grpc_shutdown(); }
-
+ static void SetUpTestCase() { grpc_init(); }
+
+ static void TearDownTestCase() { grpc_shutdown(); }
+
void CheckSliceSize(const Slice& s, const TString& content) {
EXPECT_EQ(content.size(), s.size());
}
@@ -82,7 +82,7 @@ TEST_F(SliceTest, SliceNew) {
}
TEST_F(SliceTest, SliceNewDoNothing) {
- Slice spp(const_cast<char*>(kContent), strlen(kContent), [](void* /*p*/) {});
+ Slice spp(const_cast<char*>(kContent), strlen(kContent), [](void* /*p*/) {});
CheckSlice(spp, kContent);
}
@@ -106,7 +106,7 @@ TEST_F(SliceTest, SliceNewWithUserData) {
TEST_F(SliceTest, SliceNewLen) {
Slice spp(const_cast<char*>(kContent), strlen(kContent),
- [](void* /*p*/, size_t l) { EXPECT_EQ(l, strlen(kContent)); });
+ [](void* /*p*/, size_t l) { EXPECT_EQ(l, strlen(kContent)); });
CheckSlice(spp, kContent);
}
diff --git a/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc b/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc
index 98a89b51ee..f7134b773f 100644
--- a/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc
+++ b/contrib/libs/grpc/test/cpp/util/test_credentials_provider.cc
@@ -19,50 +19,50 @@
#include "test/cpp/util/test_credentials_provider.h"
-#include <cstdio>
-#include <fstream>
-#include <iostream>
-
+#include <cstdio>
+#include <fstream>
+#include <iostream>
+
#include <mutex>
#include <unordered_map>
-#include <gflags/gflags.h>
+#include <gflags/gflags.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
-#include <grpcpp/security/server_credentials.h>
+#include <grpcpp/security/server_credentials.h>
#include "test/core/end2end/data/ssl_test_data.h"
-DEFINE_string(tls_cert_file, "", "The TLS cert file used when --use_tls=true");
-DEFINE_string(tls_key_file, "", "The TLS key file used when --use_tls=true");
-
+DEFINE_string(tls_cert_file, "", "The TLS cert file used when --use_tls=true");
+DEFINE_string(tls_key_file, "", "The TLS key file used when --use_tls=true");
+
namespace grpc {
namespace testing {
namespace {
TString ReadFile(const TString& src_path) {
- std::ifstream src;
- src.open(src_path, std::ifstream::in | std::ifstream::binary);
-
+ std::ifstream src;
+ src.open(src_path, std::ifstream::in | std::ifstream::binary);
+
TString contents;
- src.seekg(0, std::ios::end);
- contents.reserve(src.tellg());
- src.seekg(0, std::ios::beg);
- contents.assign((std::istreambuf_iterator<char>(src)),
- (std::istreambuf_iterator<char>()));
- return contents;
-}
-
+ src.seekg(0, std::ios::end);
+ contents.reserve(src.tellg());
+ src.seekg(0, std::ios::beg);
+ contents.assign((std::istreambuf_iterator<char>(src)),
+ (std::istreambuf_iterator<char>()));
+ return contents;
+}
+
class DefaultCredentialsProvider : public CredentialsProvider {
public:
- DefaultCredentialsProvider() {
- if (!FLAGS_tls_key_file.empty()) {
- custom_server_key_ = ReadFile(FLAGS_tls_key_file);
- }
- if (!FLAGS_tls_cert_file.empty()) {
- custom_server_cert_ = ReadFile(FLAGS_tls_cert_file);
- }
- }
+ DefaultCredentialsProvider() {
+ if (!FLAGS_tls_key_file.empty()) {
+ custom_server_key_ = ReadFile(FLAGS_tls_key_file);
+ }
+ if (!FLAGS_tls_cert_file.empty()) {
+ custom_server_cert_ = ReadFile(FLAGS_tls_cert_file);
+ }
+ }
~DefaultCredentialsProvider() override {}
void AddSecureType(
@@ -92,7 +92,7 @@ class DefaultCredentialsProvider : public CredentialsProvider {
} else if (type == grpc::testing::kTlsCredentialsType) {
SslCredentialsOptions ssl_opts = {test_root_cert, "", ""};
args->SetSslTargetNameOverride("foo.test.google.fr");
- return grpc::SslCredentials(ssl_opts);
+ return grpc::SslCredentials(ssl_opts);
} else if (type == grpc::testing::kGoogleDefaultCredentialsType) {
return grpc::GoogleDefaultCredentials();
} else {
@@ -118,15 +118,15 @@ class DefaultCredentialsProvider : public CredentialsProvider {
} else if (type == grpc::testing::kTlsCredentialsType) {
SslServerCredentialsOptions ssl_opts;
ssl_opts.pem_root_certs = "";
- if (!custom_server_key_.empty() && !custom_server_cert_.empty()) {
- SslServerCredentialsOptions::PemKeyCertPair pkcp = {
- custom_server_key_, custom_server_cert_};
- ssl_opts.pem_key_cert_pairs.push_back(pkcp);
- } else {
- SslServerCredentialsOptions::PemKeyCertPair pkcp = {test_server1_key,
- test_server1_cert};
- ssl_opts.pem_key_cert_pairs.push_back(pkcp);
- }
+ if (!custom_server_key_.empty() && !custom_server_cert_.empty()) {
+ SslServerCredentialsOptions::PemKeyCertPair pkcp = {
+ custom_server_key_, custom_server_cert_};
+ ssl_opts.pem_key_cert_pairs.push_back(pkcp);
+ } else {
+ SslServerCredentialsOptions::PemKeyCertPair pkcp = {test_server1_key,
+ test_server1_cert};
+ ssl_opts.pem_key_cert_pairs.push_back(pkcp);
+ }
return SslServerCredentials(ssl_opts);
} else {
std::unique_lock<std::mutex> lock(mu_);
diff --git a/contrib/libs/grpc/test/cpp/util/ya.make b/contrib/libs/grpc/test/cpp/util/ya.make
index 72346d2811..f043cc5b14 100644
--- a/contrib/libs/grpc/test/cpp/util/ya.make
+++ b/contrib/libs/grpc/test/cpp/util/ya.make
@@ -9,7 +9,7 @@ OWNER(orivej)
PEERDIR(
contrib/libs/gflags
contrib/libs/protoc
- contrib/libs/grpc/src/proto/grpc/reflection/v1alpha
+ contrib/libs/grpc/src/proto/grpc/reflection/v1alpha
contrib/restricted/googletest/googlemock
contrib/restricted/googletest/googletest
)
@@ -22,14 +22,14 @@ ADDINCL(
NO_COMPILER_WARNINGS()
SRCS(
- byte_buffer_proto_helper.cc
+ byte_buffer_proto_helper.cc
# grpc_cli_libs:
cli_call.cc
cli_credentials.cc
grpc_tool.cc
proto_file_parser.cc
service_describer.cc
- string_ref_helper.cc
+ string_ref_helper.cc
# grpc++_proto_reflection_desc_db:
proto_reflection_descriptor_database.cc
# grpc++_test_config: