aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/grpcio
diff options
context:
space:
mode:
authornkozlovskiy <nmk@ydb.tech>2023-09-29 12:24:06 +0300
committernkozlovskiy <nmk@ydb.tech>2023-09-29 12:41:34 +0300
commite0e3e1717e3d33762ce61950504f9637a6e669ed (patch)
treebca3ff6939b10ed60c3d5c12439963a1146b9711 /contrib/python/grpcio
parent38f2c5852db84c7b4d83adfcb009eb61541d1ccd (diff)
downloadydb-e0e3e1717e3d33762ce61950504f9637a6e669ed.tar.gz
add ydb deps
Diffstat (limited to 'contrib/python/grpcio')
-rw-r--r--contrib/python/grpcio/py2/LICENSE610
-rw-r--r--contrib/python/grpcio/py2/README.md104
-rw-r--r--contrib/python/grpcio/py2/README.rst115
-rw-r--r--contrib/python/grpcio/py2/grpc/__init__.py2190
-rw-r--r--contrib/python/grpcio/py2/grpc/_auth.py62
-rw-r--r--contrib/python/grpcio/py2/grpc/_channel.py1585
-rw-r--r--contrib/python/grpcio/py2/grpc/_common.py168
-rw-r--r--contrib/python/grpcio/py2/grpc/_compression.py55
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pxd.pxi16
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pyx.pxi35
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pxd.pxi47
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pyx.pxi508
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi57
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi184
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pxd.pxi27
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pyx.pxi133
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/common.pyx.pxi202
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi52
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi174
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi43
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi114
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi29
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi44
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pxd.pxi92
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pyx.pxi1093
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pxd.pxi36
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pyx.pxi85
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pxd.pxi20
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pyx.pxi97
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pxd.pxi74
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pyx.pxi516
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channelz.pyx.pxi71
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pxd.pxi32
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pyx.pxi137
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pxd.pxi117
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pyx.pxi442
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/csds.pyx.pxi21
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pxd.pxi45
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pyx.pxi55
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pxd.pxi29
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pyx.pxi208
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_windows.pyx.pxi61
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc.pxi729
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi21
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi137
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_string.pyx.pxi51
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pxd.pxi26
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pyx.pxi73
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pxd.pxi111
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pyx.pxi250
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi20
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi20
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pxd.pxi34
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pyx.pxi197
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pxd.pxi17
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pyx.pxi85
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pxd.pxi29
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pyx.pxi165
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pxd.pxi58
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pyx.pxi88
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/thread.pyx.pxi59
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pxd.pxi19
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pyx.pxi29
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pxd.pxi23
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pyx.pxi36
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/cygrpc.pxd50
-rw-r--r--contrib/python/grpcio/py2/grpc/_cython/cygrpc.pyx94
-rw-r--r--contrib/python/grpcio/py2/grpc/_grpcio_metadata.py1
-rw-r--r--contrib/python/grpcio/py2/grpc/_interceptor.py562
-rw-r--r--contrib/python/grpcio/py2/grpc/_plugin_wrapping.py113
-rw-r--r--contrib/python/grpcio/py2/grpc/_runtime_protos.py155
-rw-r--r--contrib/python/grpcio/py2/grpc/_server.py1003
-rw-r--r--contrib/python/grpcio/py2/grpc/_utilities.py168
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/_client_adaptations.py706
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/_metadata.py52
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/_server_adaptations.py385
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/implementations.py311
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/interfaces.py164
-rw-r--r--contrib/python/grpcio/py2/grpc/beta/utilities.py149
-rw-r--r--contrib/python/grpcio/py2/grpc/experimental/__init__.py128
-rw-r--r--contrib/python/grpcio/py2/grpc/experimental/gevent.py27
-rw-r--r--contrib/python/grpcio/py2/grpc/experimental/session_cache.py45
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/common/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/common/cardinality.py26
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/common/style.py24
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/abandonment.py22
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/callable_util.py96
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/future.py221
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/logging_pool.py71
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/stream.py45
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/foundation/stream_util.py148
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/base/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/base/base.py327
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/base/utilities.py71
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/face/__init__.py13
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/face/face.py1050
-rw-r--r--contrib/python/grpcio/py2/grpc/framework/interfaces/face/utilities.py168
-rw-r--r--contrib/python/grpcio/py2/ya.make89
-rw-r--r--contrib/python/grpcio/py3/LICENSE610
-rw-r--r--contrib/python/grpcio/py3/README.md104
-rw-r--r--contrib/python/grpcio/py3/README.rst115
-rw-r--r--contrib/python/grpcio/py3/grpc/__init__.py2174
-rw-r--r--contrib/python/grpcio/py3/grpc/_auth.py68
-rw-r--r--contrib/python/grpcio/py3/grpc/_channel.py1767
-rw-r--r--contrib/python/grpcio/py3/grpc/_common.py177
-rw-r--r--contrib/python/grpcio/py3/grpc/_compression.py63
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pxd.pxi16
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pyx.pxi35
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pxd.pxi47
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pyx.pxi508
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi57
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi185
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pxd.pxi27
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pyx.pxi135
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/common.pyx.pxi202
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi52
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi174
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi43
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi114
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi29
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi44
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pxd.pxi92
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pyx.pxi1097
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pxd.pxi36
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pyx.pxi85
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pxd.pxi20
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pyx.pxi97
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pxd.pxi74
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pyx.pxi516
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channelz.pyx.pxi71
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pxd.pxi32
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pyx.pxi139
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pxd.pxi117
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pyx.pxi443
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/csds.pyx.pxi21
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pxd.pxi47
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pyx.pxi54
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pxd.pxi29
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pyx.pxi208
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_windows.pyx.pxi61
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc.pxi735
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi21
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi137
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_string.pyx.pxi51
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pxd.pxi26
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pyx.pxi73
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pxd.pxi111
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pyx.pxi250
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi20
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi20
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pxd.pxi34
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pyx.pxi201
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pxd.pxi17
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pyx.pxi85
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pxd.pxi29
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pyx.pxi165
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pxd.pxi58
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pyx.pxi88
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/thread.pyx.pxi59
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pxd.pxi19
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pyx.pxi29
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pxd.pxi23
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pyx.pxi36
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/cygrpc.pxd50
-rw-r--r--contrib/python/grpcio/py3/grpc/_cython/cygrpc.pyx94
-rw-r--r--contrib/python/grpcio/py3/grpc/_grpcio_metadata.py1
-rw-r--r--contrib/python/grpcio/py3/grpc/_interceptor.py638
-rw-r--r--contrib/python/grpcio/py3/grpc/_plugin_wrapping.py121
-rw-r--r--contrib/python/grpcio/py3/grpc/_runtime_protos.py159
-rw-r--r--contrib/python/grpcio/py3/grpc/_server.py1141
-rw-r--r--contrib/python/grpcio/py3/grpc/_simple_stubs.py486
-rw-r--r--contrib/python/grpcio/py3/grpc/_typing.py58
-rw-r--r--contrib/python/grpcio/py3/grpc/_utilities.py180
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/__init__.py95
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_base_call.py248
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_base_channel.py348
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_base_server.py369
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_call.py649
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_channel.py492
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_interceptor.py1001
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_metadata.py120
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_server.py209
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_typing.py35
-rw-r--r--contrib/python/grpcio/py3/grpc/aio/_utils.py22
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/_client_adaptations.py706
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/_metadata.py52
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/_server_adaptations.py385
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/implementations.py311
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/interfaces.py163
-rw-r--r--contrib/python/grpcio/py3/grpc/beta/utilities.py149
-rw-r--r--contrib/python/grpcio/py3/grpc/experimental/__init__.py128
-rw-r--r--contrib/python/grpcio/py3/grpc/experimental/aio/__init__.py16
-rw-r--r--contrib/python/grpcio/py3/grpc/experimental/gevent.py27
-rw-r--r--contrib/python/grpcio/py3/grpc/experimental/session_cache.py45
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/common/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/common/cardinality.py26
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/common/style.py24
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/abandonment.py22
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/callable_util.py94
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/future.py219
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/logging_pool.py71
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/stream.py43
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/foundation/stream_util.py148
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/base/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/base/base.py325
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/base/utilities.py71
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/face/__init__.py13
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/face/face.py1049
-rw-r--r--contrib/python/grpcio/py3/grpc/framework/interfaces/face/utilities.py168
-rw-r--r--contrib/python/grpcio/py3/ya.make100
-rw-r--r--contrib/python/grpcio/ya.make18
223 files changed, 41915 insertions, 0 deletions
diff --git a/contrib/python/grpcio/py2/LICENSE b/contrib/python/grpcio/py2/LICENSE
new file mode 100644
index 0000000000..0e09a3e909
--- /dev/null
+++ b/contrib/python/grpcio/py2/LICENSE
@@ -0,0 +1,610 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-----------------------------------------------------------
+
+BSD 3-Clause License
+
+Copyright 2016, Google Inc.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------------------------------------------------
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/contrib/python/grpcio/py2/README.md b/contrib/python/grpcio/py2/README.md
new file mode 100644
index 0000000000..e9a18c358a
--- /dev/null
+++ b/contrib/python/grpcio/py2/README.md
@@ -0,0 +1,104 @@
+# gRPC – An RPC library and framework
+
+gRPC is a modern, open source, high-performance remote procedure call (RPC)
+framework that can run anywhere. gRPC enables client and server applications to
+communicate transparently, and simplifies the building of connected systems.
+
+<table>
+ <tr>
+ <td><b>Homepage:</b></td>
+ <td><a href="https://grpc.io/">grpc.io</a></td>
+ </tr>
+ <tr>
+ <td><b>Mailing List:</b></td>
+ <td><a href="https://groups.google.com/forum/#!forum/grpc-io">grpc-io@googlegroups.com</a></td>
+ </tr>
+</table>
+
+[![Join the chat at https://gitter.im/grpc/grpc](https://badges.gitter.im/grpc/grpc.svg)](https://gitter.im/grpc/grpc?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+## To start using gRPC
+
+To maximize usability, gRPC supports the standard method for adding dependencies
+to a user's chosen language (if there is one). In most languages, the gRPC
+runtime comes as a package available in a user's language package manager.
+
+For instructions on how to use the language-specific gRPC runtime for a project,
+please refer to these documents
+
+- [C++](src/cpp): follow the instructions under the `src/cpp` directory
+- [C#](src/csharp): NuGet package `Grpc`
+- [Dart](https://github.com/grpc/grpc-dart): pub package `grpc`
+- [Go](https://github.com/grpc/grpc-go): `go get google.golang.org/grpc`
+- [Java](https://github.com/grpc/grpc-java): Use JARs from Maven Central
+ Repository
+- [Kotlin](https://github.com/grpc/grpc-kotlin): Use JARs from Maven Central
+ Repository
+- [Node](https://github.com/grpc/grpc-node): `npm install grpc`
+- [Objective-C](src/objective-c): Add `gRPC-ProtoRPC` dependency to podspec
+- [PHP](src/php): `pecl install grpc`
+- [Python](src/python/grpcio): `pip install grpcio`
+- [Ruby](src/ruby): `gem install grpc`
+- [WebJS](https://github.com/grpc/grpc-web): follow the grpc-web instructions
+
+Per-language quickstart guides and tutorials can be found in the
+[documentation section on the grpc.io website](https://grpc.io/docs/). Code
+examples are available in the [examples](examples) directory.
+
+Precompiled bleeding-edge package builds of gRPC `master` branch's `HEAD` are
+uploaded daily to [packages.grpc.io](https://packages.grpc.io).
+
+## To start developing gRPC
+
+Contributions are welcome!
+
+Please read [How to contribute](CONTRIBUTING.md) which will guide you through
+the entire workflow of how to build the source code, how to run the tests, and
+how to contribute changes to the gRPC codebase. The "How to contribute" document
+also contains info on how the contribution process works and contains best
+practices for creating contributions.
+
+## Troubleshooting
+
+Sometimes things go wrong. Please check out the
+[Troubleshooting guide](TROUBLESHOOTING.md) if you are experiencing issues with
+gRPC.
+
+## Performance
+
+See the
+[Performance dashboard](https://grafana-dot-grpc-testing.appspot.com/)
+for performance numbers of master branch daily builds.
+
+## Concepts
+
+See [gRPC Concepts](CONCEPTS.md)
+
+## About This Repository
+
+This repository contains source code for gRPC libraries implemented in multiple
+languages written on top of a shared C core library [src/core](src/core).
+
+Libraries in different languages may be in various states of development. We are
+seeking contributions for all of these libraries:
+
+| Language | Source |
+| ----------------------- | ---------------------------------- |
+| Shared C [core library] | [src/core](src/core) |
+| C++ | [src/cpp](src/cpp) |
+| Ruby | [src/ruby](src/ruby) |
+| Python | [src/python](src/python) |
+| PHP | [src/php](src/php) |
+| C# (core library based) | [src/csharp](src/csharp) |
+| Objective-C | [src/objective-c](src/objective-c) |
+
+| Language | Source repo |
+| -------------------- | -------------------------------------------------- |
+| Java | [grpc-java](https://github.com/grpc/grpc-java) |
+| Kotlin | [grpc-kotlin](https://github.com/grpc/grpc-kotlin) |
+| Go | [grpc-go](https://github.com/grpc/grpc-go) |
+| NodeJS | [grpc-node](https://github.com/grpc/grpc-node) |
+| WebJS | [grpc-web](https://github.com/grpc/grpc-web) |
+| Dart | [grpc-dart](https://github.com/grpc/grpc-dart) |
+| .NET (pure C# impl.) | [grpc-dotnet](https://github.com/grpc/grpc-dotnet) |
+| Swift | [grpc-swift](https://github.com/grpc/grpc-swift) |
diff --git a/contrib/python/grpcio/py2/README.rst b/contrib/python/grpcio/py2/README.rst
new file mode 100644
index 0000000000..f3e261db2e
--- /dev/null
+++ b/contrib/python/grpcio/py2/README.rst
@@ -0,0 +1,115 @@
+gRPC Python
+===========
+
+|compat_check_pypi|
+
+Package for gRPC Python.
+
+.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=grpcio
+ :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=grpcio
+
+Supported Python Versions
+-------------------------
+Python >= 3.7
+
+Installation
+------------
+
+gRPC Python is available for Linux, macOS, and Windows.
+
+Installing From PyPI
+~~~~~~~~~~~~~~~~~~~~
+
+If you are installing locally...
+
+::
+
+ $ pip install grpcio
+
+Else system wide (on Ubuntu)...
+
+::
+
+ $ sudo pip install grpcio
+
+If you're on Windows make sure that you installed the :code:`pip.exe` component
+when you installed Python (if not go back and install it!) then invoke:
+
+::
+
+ $ pip.exe install grpcio
+
+Windows users may need to invoke :code:`pip.exe` from a command line ran as
+administrator.
+
+n.b. On Windows and on Mac OS X one *must* have a recent release of :code:`pip`
+to retrieve the proper wheel from PyPI. Be sure to upgrade to the latest
+version!
+
+Installing From Source
+~~~~~~~~~~~~~~~~~~~~~~
+
+Building from source requires that you have the Python headers (usually a
+package named :code:`python-dev`).
+
+::
+
+ $ export REPO_ROOT=grpc # REPO_ROOT can be any directory of your choice
+ $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc $REPO_ROOT
+ $ cd $REPO_ROOT
+ $ git submodule update --init
+
+ # For the next two commands do `sudo pip install` if you get permission-denied errors
+ $ pip install -rrequirements.txt
+ $ GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .
+
+You cannot currently install Python from source on Windows. Things might work
+out for you in MSYS2 (follow the Linux instructions), but it isn't officially
+supported at the moment.
+
+Troubleshooting
+~~~~~~~~~~~~~~~
+
+Help, I ...
+
+* **... see a** :code:`pkg_resources.VersionConflict` **when I try to install
+ grpc**
+
+ This is likely because :code:`pip` doesn't own the offending dependency,
+ which in turn is likely because your operating system's package manager owns
+ it. You'll need to force the installation of the dependency:
+
+ :code:`pip install --ignore-installed $OFFENDING_DEPENDENCY`
+
+ For example, if you get an error like the following:
+
+ ::
+
+ Traceback (most recent call last):
+ File "<string>", line 17, in <module>
+ ...
+ File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 509, in find
+ raise VersionConflict(dist, req)
+ pkg_resources.VersionConflict: (six 1.8.0 (/usr/lib/python2.7/dist-packages), Requirement.parse('six>=1.10'))
+
+ You can fix it by doing:
+
+ ::
+
+ sudo pip install --ignore-installed six
+
+* **... see the following error on some platforms**
+
+ ::
+
+ /tmp/pip-build-U8pSsr/cython/Cython/Plex/Scanners.c:4:20: fatal error: Python.h: No such file or directory
+ #include "Python.h"
+ ^
+ compilation terminated.
+
+ You can fix it by installing `python-dev` package. i.e
+
+ ::
+
+ sudo apt-get install python-dev
+
diff --git a/contrib/python/grpcio/py2/grpc/__init__.py b/contrib/python/grpcio/py2/grpc/__init__.py
new file mode 100644
index 0000000000..bc645e5750
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/__init__.py
@@ -0,0 +1,2190 @@
+# Copyright 2015-2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's Python API."""
+
+import abc
+import contextlib
+import enum
+import logging
+import sys
+
+from grpc import _compression
+from grpc._cython import cygrpc as _cygrpc
+from grpc._runtime_protos import protos
+from grpc._runtime_protos import protos_and_services
+from grpc._runtime_protos import services
+import six
+
+logging.getLogger(__name__).addHandler(logging.NullHandler())
+
+try:
+ # pylint: disable=ungrouped-imports
+ from grpc._grpcio_metadata import __version__
+except ImportError:
+ __version__ = "dev0"
+
+############################## Future Interface ###############################
+
+
+class FutureTimeoutError(Exception):
+ """Indicates that a method call on a Future timed out."""
+
+
+class FutureCancelledError(Exception):
+ """Indicates that the computation underlying a Future was cancelled."""
+
+
+class Future(six.with_metaclass(abc.ABCMeta)):
+ """A representation of a computation in another control flow.
+
+ Computations represented by a Future may be yet to be begun,
+ may be ongoing, or may have already completed.
+ """
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
+
+ This method does not block.
+
+ Returns:
+ bool:
+ Returns True if the computation was canceled.
+
+ Returns False under all other circumstances, for example:
+
+ 1. computation has begun and could not be canceled.
+ 2. computation has finished
+ 3. computation is scheduled for execution and it is impossible
+ to determine its state without blocking.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
+
+ This method does not block.
+
+ Returns:
+ bool:
+ Returns True if the computation was cancelled before its result became
+ available.
+
+ Returns False under all other circumstances, for example:
+
+ 1. computation was not cancelled.
+ 2. computation's result is available.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
+
+ This method does not block.
+
+ Returns:
+ Returns True if the computation is scheduled for execution or
+ currently executing.
+
+ Returns False if the computation already executed or was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
+
+ This method does not block.
+
+ Returns:
+ bool:
+ Returns True if the computation already executed or was cancelled.
+ Returns False if the computation is scheduled for execution or
+ currently executing.
+ This is exactly opposite of the running() method's result.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Returns the result of the computation or raises its exception.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ finish or be cancelled. If None, the call will block until the
+ computations's termination.
+
+ Returns:
+ The return value of the computation.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation
+ does not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ Exception: If the computation raised an exception, this call will
+ raise the same exception.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled. If None, the call will block until the
+ computations's termination.
+
+ Returns:
+ The exception raised by the computation, or None if the computation
+ did not raise an exception.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation
+ does not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation
+ to terminate or be cancelled. If None, the call will block until
+ the computation's termination.
+
+ Returns:
+ The traceback of the exception raised by the computation, or None
+ if the computation did not raise an exception.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation
+ does not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
+
+ The callback will be passed this Future object describing the outcome
+ of the computation. Callbacks will be invoked after the future is
+ terminated, whether successfully or not.
+
+ If the computation has already completed, the callback will be called
+ immediately.
+
+ Exceptions raised in the callback will be logged at ERROR level, but
+ will not terminate any threads of execution.
+
+ Args:
+ fn: A callable taking this Future object as its single parameter.
+ """
+ raise NotImplementedError()
+
+
+################################ gRPC Enums ##################################
+
+
+@enum.unique
+class ChannelConnectivity(enum.Enum):
+ """Mirrors grpc_connectivity_state in the gRPC Core.
+
+ Attributes:
+ IDLE: The channel is idle.
+ CONNECTING: The channel is connecting.
+ READY: The channel is ready to conduct RPCs.
+ TRANSIENT_FAILURE: The channel has seen a failure from which it expects
+ to recover.
+ SHUTDOWN: The channel has seen a failure from which it cannot recover.
+ """
+ IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
+ CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
+ READY = (_cygrpc.ConnectivityState.ready, 'ready')
+ TRANSIENT_FAILURE = (_cygrpc.ConnectivityState.transient_failure,
+ 'transient failure')
+ SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
+
+
+@enum.unique
+class StatusCode(enum.Enum):
+ """Mirrors grpc_status_code in the gRPC Core.
+
+ Attributes:
+ OK: Not an error; returned on success
+ CANCELLED: The operation was cancelled (typically by the caller).
+ UNKNOWN: Unknown error.
+ INVALID_ARGUMENT: Client specified an invalid argument.
+ DEADLINE_EXCEEDED: Deadline expired before operation could complete.
+ NOT_FOUND: Some requested entity (e.g., file or directory) was not found.
+ ALREADY_EXISTS: Some entity that we attempted to create (e.g., file or directory)
+ already exists.
+ PERMISSION_DENIED: The caller does not have permission to execute the specified
+ operation.
+ UNAUTHENTICATED: The request does not have valid authentication credentials for the
+ operation.
+ RESOURCE_EXHAUSTED: Some resource has been exhausted, perhaps a per-user quota, or
+ perhaps the entire file system is out of space.
+ FAILED_PRECONDITION: Operation was rejected because the system is not in a state
+ required for the operation's execution.
+ ABORTED: The operation was aborted, typically due to a concurrency issue
+ like sequencer check failures, transaction aborts, etc.
+ UNIMPLEMENTED: Operation is not implemented or not supported/enabled in this service.
+ INTERNAL: Internal errors. Means some invariants expected by underlying
+ system has been broken.
+ UNAVAILABLE: The service is currently unavailable.
+ DATA_LOSS: Unrecoverable data loss or corruption.
+ """
+ OK = (_cygrpc.StatusCode.ok, 'ok')
+ CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
+ UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
+ INVALID_ARGUMENT = (_cygrpc.StatusCode.invalid_argument, 'invalid argument')
+ DEADLINE_EXCEEDED = (_cygrpc.StatusCode.deadline_exceeded,
+ 'deadline exceeded')
+ NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
+ ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
+ PERMISSION_DENIED = (_cygrpc.StatusCode.permission_denied,
+ 'permission denied')
+ RESOURCE_EXHAUSTED = (_cygrpc.StatusCode.resource_exhausted,
+ 'resource exhausted')
+ FAILED_PRECONDITION = (_cygrpc.StatusCode.failed_precondition,
+ 'failed precondition')
+ ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
+ OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
+ UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
+ INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
+ UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
+ DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
+ UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
+
+
+############################# gRPC Status ################################
+
+
+class Status(six.with_metaclass(abc.ABCMeta)):
+ """Describes the status of an RPC.
+
+ This is an EXPERIMENTAL API.
+
+ Attributes:
+ code: A StatusCode object to be sent to the client.
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+ trailing_metadata: The trailing :term:`metadata` in the RPC.
+ """
+
+
+############################# gRPC Exceptions ################################
+
+
+class RpcError(Exception):
+ """Raised by the gRPC library to indicate non-OK-status RPC termination."""
+
+
+############################## Shared Context ################################
+
+
+class RpcContext(six.with_metaclass(abc.ABCMeta)):
+ """Provides RPC-related information and action."""
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated.
+
+ Returns:
+ bool:
+ True if RPC is active, False otherwise.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have
+ timed out, or None if no deadline was specified for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
+
+ Idempotent and has no effect if the RPC has already terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_callback(self, callback):
+ """Registers a callback to be called on RPC termination.
+
+ Args:
+ callback: A no-parameter callable to be called on RPC termination.
+
+ Returns:
+ True if the callback was added and will be called later; False if
+ the callback was not added and will not be called (because the RPC
+ already terminated or some other reason).
+ """
+ raise NotImplementedError()
+
+
+######################### Invocation-Side Context ############################
+
+
+class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
+ """Invocation-side utility object for an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The initial :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def trailing_metadata(self):
+ """Accesses the trailing metadata sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The trailing :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the status code sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The details string of the RPC.
+ """
+ raise NotImplementedError()
+
+
+############## Invocation-Side Interceptor Interfaces & Classes ##############
+
+
+class ClientCallDetails(six.with_metaclass(abc.ABCMeta)):
+ """Describes an RPC to be invoked.
+
+ Attributes:
+ method: The method name of the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to
+ the service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+ """
+
+
+class UnaryUnaryClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+ """Affords intercepting unary-unary invocations."""
+
+ @abc.abstractmethod
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ """Intercepts a unary-unary invocation asynchronously.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_future = continuation(client_call_details, request)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and a Future. In the event of RPC
+ completion, the return Call-Future's result value will be
+ the response message of the RPC. Should the event terminate
+ with non-OK status, the returned Call-Future's exception value
+ will be an RpcError.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request: The request value for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's
+ result value will be the response message of the RPC.
+ Should the event terminate with non-OK status, the returned
+ Call-Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+ """Affords intercepting unary-stream invocations."""
+
+ @abc.abstractmethod
+ def intercept_unary_stream(self, continuation, client_call_details,
+ request):
+ """Intercepts a unary-stream invocation.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_iterator = continuation(client_call_details, request)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and an iterator for response values.
+ Drawing response values from the returned Call-iterator may
+ raise RpcError indicating termination of the RPC with non-OK
+ status.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request: The request value for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of
+ response values. Drawing response values from the returned
+ Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status. This object *should* also fulfill the
+ Future interface, though it may not.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+ """Affords intercepting stream-unary invocations."""
+
+ @abc.abstractmethod
+ def intercept_stream_unary(self, continuation, client_call_details,
+ request_iterator):
+ """Intercepts a stream-unary invocation asynchronously.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_future = continuation(client_call_details, request_iterator)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and a Future. In the event of RPC completion,
+ the return Call-Future's result value will be the response message
+ of the RPC. Should the event terminate with non-OK status, the
+ returned Call-Future's exception value will be an RpcError.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's
+ result value will be the response message of the RPC.
+ Should the event terminate with non-OK status, the returned
+ Call-Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamClientInterceptor(six.with_metaclass(abc.ABCMeta)):
+ """Affords intercepting stream-stream invocations."""
+
+ @abc.abstractmethod
+ def intercept_stream_stream(self, continuation, client_call_details,
+ request_iterator):
+ """Intercepts a stream-stream invocation.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_iterator = continuation(client_call_details, request_iterator)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and an iterator for response values.
+ Drawing response values from the returned Call-iterator may
+ raise RpcError indicating termination of the RPC with non-OK
+ status.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of
+ response values. Drawing response values from the returned
+ Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status. This object *should* also fulfill the
+ Future interface, though it may not.
+ """
+ raise NotImplementedError()
+
+
+############ Authentication & Authorization Interfaces & Classes #############
+
+
+class ChannelCredentials(object):
+ """An encapsulation of the data required to create a secure Channel.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions. For
+ example, ssl_channel_credentials returns an instance of this class and
+ secure_channel requires an instance of this class.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class CallCredentials(object):
+ """An encapsulation of the data required to assert an identity over a call.
+
+ A CallCredentials has to be used with secure Channel, otherwise the
+ metadata will not be transmitted to the server.
+
+ A CallCredentials may be composed with ChannelCredentials to always assert
+ identity for every call over that Channel.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class AuthMetadataContext(six.with_metaclass(abc.ABCMeta)):
+ """Provides information to call credentials metadata plugins.
+
+ Attributes:
+ service_url: A string URL of the service being called into.
+ method_name: A string of the fully qualified method name being called.
+ """
+
+
+class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
+ """Callback object received by a metadata plugin."""
+
+ def __call__(self, metadata, error):
+ """Passes to the gRPC runtime authentication metadata for an RPC.
+
+ Args:
+ metadata: The :term:`metadata` used to construct the CallCredentials.
+ error: An Exception to indicate error or None to indicate success.
+ """
+ raise NotImplementedError()
+
+
+class AuthMetadataPlugin(six.with_metaclass(abc.ABCMeta)):
+ """A specification for custom authentication."""
+
+ def __call__(self, context, callback):
+ """Implements authentication by passing metadata to a callback.
+
+ This method will be invoked asynchronously in a separate thread.
+
+ Args:
+ context: An AuthMetadataContext providing information on the RPC that
+ the plugin is being called to authenticate.
+ callback: An AuthMetadataPluginCallback to be invoked either
+ synchronously or asynchronously.
+ """
+ raise NotImplementedError()
+
+
+class ServerCredentials(object):
+ """An encapsulation of the data required to open a secure port on a Server.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class ServerCertificateConfiguration(object):
+ """A certificate configuration for use with an SSL-enabled Server.
+
+ Instances of this class can be returned in the certificate configuration
+ fetching callback.
+
+ This class has no supported interface -- it exists to define the
+ type of its instances and its instances exist to be passed to
+ other functions.
+ """
+
+ def __init__(self, certificate_configuration):
+ self._certificate_configuration = certificate_configuration
+
+
+######################## Multi-Callable Interfaces ###########################
+
+
+class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a unary-unary RPC from client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow
+ for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ The response value for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional durating of time in seconds to allow for
+ the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ The response value for the RPC and a Call value for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's result
+ value will be the response message of the RPC.
+ Should the event terminate with non-OK status,
+ the returned Call-Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a unary-stream RPC from client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: An optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ An object that is a Call for the RPC, an iterator of response
+ values, and a Future for the RPC. Drawing response values from the
+ returned Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a stream-unary RPC from client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for
+ the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ The response value for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also implement grpc.Call, affording methods
+ such as metadata, code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC on the client.
+
+ Args:
+ request_iterator: An iterator that yields request values for
+ the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ The response value for the RPC and a Call object for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Asynchronously invokes the underlying RPC on the client.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's result value
+ will be the response message of the RPC. Should the event terminate
+ with non-OK status, the returned Call-Future's exception value will
+ be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a stream-stream RPC on client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Invokes the underlying RPC on the client.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If not specified, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: This is an EXPERIMENTAL argument. An optional
+ flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This is an EXPERIMENTAL option.
+
+ Returns:
+ An object that is a Call for the RPC, an iterator of response
+ values, and a Future for the RPC. Drawing response values from the
+ returned Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status.
+ """
+ raise NotImplementedError()
+
+
+############################# Channel Interface ##############################
+
+
+class Channel(six.with_metaclass(abc.ABCMeta)):
+ """Affords RPC invocation via generic methods on client-side.
+
+ Channel objects implement the Context Manager type, although they need not
+ support being entered and exited multiple times.
+ """
+
+ @abc.abstractmethod
+ def subscribe(self, callback, try_to_connect=False):
+ """Subscribe to this Channel's connectivity state machine.
+
+ A Channel may be in any of the states described by ChannelConnectivity.
+ This method allows application to monitor the state transitions.
+ The typical use case is to debug or gain better visibility into gRPC
+ runtime's state.
+
+ Args:
+ callback: A callable to be invoked with ChannelConnectivity argument.
+ ChannelConnectivity describes current state of the channel.
+ The callable will be invoked immediately upon subscription
+ and again for every change to ChannelConnectivity until it
+ is unsubscribed or this Channel object goes out of scope.
+ try_to_connect: A boolean indicating whether or not this Channel
+ should attempt to connect immediately. If set to False, gRPC
+ runtime decides when to connect.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unsubscribe(self, callback):
+ """Unsubscribes a subscribed callback from this Channel's connectivity.
+
+ Args:
+ callback: A callable previously registered with this Channel from
+ having been passed to its "subscribe" method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A UnaryUnaryMultiCallable value for the named unary-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None is
+ passed.
+
+ Returns:
+ A UnaryStreamMultiCallable value for the name unary-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None is
+ passed.
+
+ Returns:
+ A StreamUnaryMultiCallable value for the named stream-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A StreamStreamMultiCallable value for the named stream-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def close(self):
+ """Closes this Channel and releases all resources held by it.
+
+ Closing the Channel will immediately terminate all RPCs active with the
+ Channel and it is not valid to invoke new RPCs with the Channel.
+
+ This method is idempotent.
+ """
+ raise NotImplementedError()
+
+ def __enter__(self):
+ """Enters the runtime context related to the channel object."""
+ raise NotImplementedError()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Exits the runtime context related to the channel object."""
+ raise NotImplementedError()
+
+
+########################## Service-Side Context ##############################
+
+
+class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
+ """A context object passed to method implementations."""
+
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata from the sent by the client.
+
+ Returns:
+ The invocation :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
+
+ Returns:
+ A string identifying the peer that invoked the RPC being serviced.
+ The string format is determined by gRPC runtime.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer_identities(self):
+ """Gets one or more peer identity(s).
+
+ Equivalent to
+ servicer_context.auth_context().get(servicer_context.peer_identity_key())
+
+ Returns:
+ An iterable of the identities, or None if the call is not
+ authenticated. Each identity is returned as a raw bytes type.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer_identity_key(self):
+ """The auth property used to identify the peer.
+
+ For example, "x509_common_name" or "x509_subject_alternative_name" are
+ used to identify an SSL peer.
+
+ Returns:
+ The auth property (string) that indicates the
+ peer identity, or None if the call is not authenticated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def auth_context(self):
+ """Gets the auth context for the call.
+
+ Returns:
+ A map of strings to an iterable of bytes for each auth property.
+ """
+ raise NotImplementedError()
+
+ def set_compression(self, compression):
+ """Set the compression algorithm to be used for the entire call.
+
+ This is an EXPERIMENTAL method.
+
+ Args:
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def send_initial_metadata(self, initial_metadata):
+ """Sends the initial metadata value to the client.
+
+ This method need not be called by implementations if they have no
+ metadata to add to what the gRPC runtime will transmit.
+
+ Args:
+ initial_metadata: The initial :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_trailing_metadata(self, trailing_metadata):
+ """Sets the trailing metadata for the RPC.
+
+ Sets the trailing metadata to be sent upon completion of the RPC.
+
+ If this method is invoked multiple times throughout the lifetime of an
+ RPC, the value supplied in the final invocation will be the value sent
+ over the wire.
+
+ This method need not be called by implementations if they have no
+ metadata to add to what the gRPC runtime will transmit.
+
+ Args:
+ trailing_metadata: The trailing :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ def trailing_metadata(self):
+ """Access value to be used as trailing metadata upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The trailing :term:`metadata` for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort(self, code, details):
+ """Raises an exception to terminate the RPC with a non-OK status.
+
+ The code and details passed as arguments will supercede any existing
+ ones.
+
+ Args:
+ code: A StatusCode object to be sent to the client.
+ It must not be StatusCode.OK.
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+
+ Raises:
+ Exception: An exception is always raised to signal the abortion the
+ RPC to the gRPC runtime.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort_with_status(self, status):
+ """Raises an exception to terminate the RPC with a non-OK status.
+
+ The status passed as argument will supercede any existing status code,
+ status message and trailing metadata.
+
+ This is an EXPERIMENTAL API.
+
+ Args:
+ status: A grpc.Status object. The status code in it must not be
+ StatusCode.OK.
+
+ Raises:
+ Exception: An exception is always raised to signal the abortion the
+ RPC to the gRPC runtime.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_code(self, code):
+ """Sets the value to be used as status code upon RPC completion.
+
+ This method need not be called by method implementations if they wish
+ the gRPC runtime to determine the status code of the RPC.
+
+ Args:
+ code: A StatusCode object to be sent to the client.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_details(self, details):
+ """Sets the value to be used as detail string upon RPC completion.
+
+ This method need not be called by method implementations if they have
+ no details to transmit.
+
+ Args:
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+ """
+ raise NotImplementedError()
+
+ def code(self):
+ """Accesses the value to be used as status code upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+ raise NotImplementedError()
+
+ def details(self):
+ """Accesses the value to be used as detail string upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The details string of the RPC.
+ """
+ raise NotImplementedError()
+
+ def disable_next_message_compression(self):
+ """Disables compression for the next response message.
+
+ This is an EXPERIMENTAL method.
+
+ This method will override any compression configuration set during
+ server creation or set on the call.
+ """
+ raise NotImplementedError()
+
+
+##################### Service-Side Handler Interfaces ########################
+
+
+class RpcMethodHandler(six.with_metaclass(abc.ABCMeta)):
+ """An implementation of a single RPC method.
+
+ Attributes:
+ request_streaming: Whether the RPC supports exactly one request message
+ or any arbitrary number of request messages.
+ response_streaming: Whether the RPC supports exactly one response message
+ or any arbitrary number of response messages.
+ request_deserializer: A callable :term:`deserializer` that accepts a byte string and
+ returns an object suitable to be passed to this object's business
+ logic, or None to indicate that this object's business logic should be
+ passed the raw request bytes.
+ response_serializer: A callable :term:`serializer` that accepts an object produced
+ by this object's business logic and returns a byte string, or None to
+ indicate that the byte strings produced by this object's business logic
+ should be transmitted on the wire as they are.
+ unary_unary: This object's application-specific business logic as a
+ callable value that takes a request value and a ServicerContext object
+ and returns a response value. Only non-None if both request_streaming
+ and response_streaming are False.
+ unary_stream: This object's application-specific business logic as a
+ callable value that takes a request value and a ServicerContext object
+ and returns an iterator of response values. Only non-None if
+ request_streaming is False and response_streaming is True.
+ stream_unary: This object's application-specific business logic as a
+ callable value that takes an iterator of request values and a
+ ServicerContext object and returns a response value. Only non-None if
+ request_streaming is True and response_streaming is False.
+ stream_stream: This object's application-specific business logic as a
+ callable value that takes an iterator of request values and a
+ ServicerContext object and returns an iterator of response values.
+ Only non-None if request_streaming and response_streaming are both
+ True.
+ """
+
+
+class HandlerCallDetails(six.with_metaclass(abc.ABCMeta)):
+ """Describes an RPC that has just arrived for service.
+
+ Attributes:
+ method: The method name of the RPC.
+ invocation_metadata: The :term:`metadata` sent by the client.
+ """
+
+
+class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
+ """An implementation of arbitrarily many RPC methods."""
+
+ @abc.abstractmethod
+ def service(self, handler_call_details):
+ """Returns the handler for servicing the RPC.
+
+ Args:
+ handler_call_details: A HandlerCallDetails describing the RPC.
+
+ Returns:
+ An RpcMethodHandler with which the RPC may be serviced if the
+ implementation chooses to service this RPC, or None otherwise.
+ """
+ raise NotImplementedError()
+
+
+class ServiceRpcHandler(six.with_metaclass(abc.ABCMeta, GenericRpcHandler)):
+ """An implementation of RPC methods belonging to a service.
+
+ A service handles RPC methods with structured names of the form
+ '/Service.Name/Service.Method', where 'Service.Name' is the value
+ returned by service_name(), and 'Service.Method' is the method
+ name. A service can have multiple method names, but only a single
+ service name.
+ """
+
+ @abc.abstractmethod
+ def service_name(self):
+ """Returns this service's name.
+
+ Returns:
+ The service name.
+ """
+ raise NotImplementedError()
+
+
+#################### Service-Side Interceptor Interfaces #####################
+
+
+class ServerInterceptor(six.with_metaclass(abc.ABCMeta)):
+ """Affords intercepting incoming RPCs on the service-side."""
+
+ @abc.abstractmethod
+ def intercept_service(self, continuation, handler_call_details):
+ """Intercepts incoming RPCs before handing them over to a handler.
+
+ Args:
+ continuation: A function that takes a HandlerCallDetails and
+ proceeds to invoke the next interceptor in the chain, if any,
+ or the RPC handler lookup logic, with the call details passed
+ as an argument, and returns an RpcMethodHandler instance if
+ the RPC is considered serviced, or None otherwise.
+ handler_call_details: A HandlerCallDetails describing the RPC.
+
+ Returns:
+ An RpcMethodHandler with which the RPC may be serviced if the
+ interceptor chooses to service this RPC, or None otherwise.
+ """
+ raise NotImplementedError()
+
+
+############################# Server Interface ###############################
+
+
+class Server(six.with_metaclass(abc.ABCMeta)):
+ """Services RPCs."""
+
+ @abc.abstractmethod
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ """Registers GenericRpcHandlers with this Server.
+
+ This method is only safe to call before the server is started.
+
+ Args:
+ generic_rpc_handlers: An iterable of GenericRpcHandlers that will be
+ used to service RPCs.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Opens an insecure port for accepting RPCs.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port. If the port is 0,
+ or not specified in the address, then gRPC runtime will choose a port.
+
+ Returns:
+ An integer port on which server will accept RPC requests.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Opens a secure port for accepting RPCs.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port.
+ if the port is 0, or not specified in the address, then gRPC
+ runtime will choose a port.
+ server_credentials: A ServerCredentials object.
+
+ Returns:
+ An integer port on which server will accept RPC requests.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server.
+
+ This method may only be called once. (i.e. it is not idempotent).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server.
+
+ This method immediately stop service of new RPCs in all cases.
+
+ If a grace period is specified, this method returns immediately
+ and all RPCs active at the end of the grace period are aborted.
+ If a grace period is not specified (by passing None for `grace`),
+ all existing RPCs are aborted immediately and this method
+ blocks until the last RPC handler terminates.
+
+ This method is idempotent and may be called at any time.
+ Passing a smaller grace value in a subsequent call will have
+ the effect of stopping the Server sooner (passing None will
+ have the effect of stopping the server immediately). Passing
+ a larger grace value in a subsequent call *will not* have the
+ effect of stopping the server later (i.e. the most restrictive
+ grace value is used).
+
+ Args:
+ grace: A duration of time in seconds or None.
+
+ Returns:
+ A threading.Event that will be set when this Server has completely
+ stopped, i.e. when running RPCs either complete or are aborted and
+ all handlers have terminated.
+ """
+ raise NotImplementedError()
+
+ def wait_for_termination(self, timeout=None):
+ """Block current thread until the server stops.
+
+ This is an EXPERIMENTAL API.
+
+ The wait will not consume computational resources during blocking, and
+ it will block until one of the two following conditions are met:
+
+ 1) The server is stopped or terminated;
+ 2) A timeout occurs if timeout is not `None`.
+
+ The timeout argument works in the same way as `threading.Event.wait()`.
+ https://docs.python.org/3/library/threading.html#threading.Event.wait
+
+ Args:
+ timeout: A floating point number specifying a timeout for the
+ operation in seconds.
+
+ Returns:
+ A bool indicates if the operation times out.
+ """
+ raise NotImplementedError()
+
+
+################################# Functions ################################
+
+
+def unary_unary_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-unary RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts one request
+ and returns one response.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(False, False, request_deserializer,
+ response_serializer, behavior, None,
+ None, None)
+
+
+def unary_stream_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-stream RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts one request
+ and returns an iterator of response values.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(False, True, request_deserializer,
+ response_serializer, None, behavior,
+ None, None)
+
+
+def stream_unary_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-unary RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts an iterator of
+ request values and returns a single response value.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(True, False, request_deserializer,
+ response_serializer, None, None,
+ behavior, None)
+
+
+def stream_stream_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-stream RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts an iterator of
+ request values and returns an iterator of response values.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(True, True, request_deserializer,
+ response_serializer, None, None, None,
+ behavior)
+
+
+def method_handlers_generic_handler(service, method_handlers):
+ """Creates a GenericRpcHandler from RpcMethodHandlers.
+
+ Args:
+ service: The name of the service that is implemented by the
+ method_handlers.
+ method_handlers: A dictionary that maps method names to corresponding
+ RpcMethodHandler.
+
+ Returns:
+ A GenericRpcHandler. This is typically added to the grpc.Server object
+ with add_generic_rpc_handlers() before starting the server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.DictionaryGenericHandler(service, method_handlers)
+
+
+def ssl_channel_credentials(root_certificates=None,
+ private_key=None,
+ certificate_chain=None):
+ """Creates a ChannelCredentials for use with an SSL-enabled Channel.
+
+ Args:
+ root_certificates: The PEM-encoded root certificates as a byte string,
+ or None to retrieve them from a default location chosen by gRPC
+ runtime.
+ private_key: The PEM-encoded private key as a byte string, or None if no
+ private key should be used.
+ certificate_chain: The PEM-encoded certificate chain as a byte string
+ to use or None if no certificate chain should be used.
+
+ Returns:
+ A ChannelCredentials for use with an SSL-enabled Channel.
+ """
+ return ChannelCredentials(
+ _cygrpc.SSLChannelCredentials(root_certificates, private_key,
+ certificate_chain))
+
+
+def xds_channel_credentials(fallback_credentials=None):
+ """Creates a ChannelCredentials for use with xDS. This is an EXPERIMENTAL
+ API.
+
+ Args:
+ fallback_credentials: Credentials to use in case it is not possible to
+ establish a secure connection via xDS. If no fallback_credentials
+ argument is supplied, a default SSLChannelCredentials is used.
+ """
+ fallback_credentials = ssl_channel_credentials(
+ ) if fallback_credentials is None else fallback_credentials
+ return ChannelCredentials(
+ _cygrpc.XDSChannelCredentials(fallback_credentials._credentials))
+
+
+def metadata_call_credentials(metadata_plugin, name=None):
+ """Construct CallCredentials from an AuthMetadataPlugin.
+
+ Args:
+ metadata_plugin: An AuthMetadataPlugin to use for authentication.
+ name: An optional name for the plugin.
+
+ Returns:
+ A CallCredentials.
+ """
+ from grpc import _plugin_wrapping # pylint: disable=cyclic-import
+ return _plugin_wrapping.metadata_plugin_call_credentials(
+ metadata_plugin, name)
+
+
+def access_token_call_credentials(access_token):
+ """Construct CallCredentials from an access token.
+
+ Args:
+ access_token: A string to place directly in the http request
+ authorization header, for example
+ "authorization: Bearer <access_token>".
+
+ Returns:
+ A CallCredentials.
+ """
+ from grpc import _auth # pylint: disable=cyclic-import
+ from grpc import _plugin_wrapping # pylint: disable=cyclic-import
+ return _plugin_wrapping.metadata_plugin_call_credentials(
+ _auth.AccessTokenAuthMetadataPlugin(access_token), None)
+
+
+def composite_call_credentials(*call_credentials):
+ """Compose multiple CallCredentials to make a new CallCredentials.
+
+ Args:
+ *call_credentials: At least two CallCredentials objects.
+
+ Returns:
+ A CallCredentials object composed of the given CallCredentials objects.
+ """
+ return CallCredentials(
+ _cygrpc.CompositeCallCredentials(
+ tuple(single_call_credentials._credentials
+ for single_call_credentials in call_credentials)))
+
+
+def composite_channel_credentials(channel_credentials, *call_credentials):
+ """Compose a ChannelCredentials and one or more CallCredentials objects.
+
+ Args:
+ channel_credentials: A ChannelCredentials object.
+ *call_credentials: One or more CallCredentials objects.
+
+ Returns:
+ A ChannelCredentials composed of the given ChannelCredentials and
+ CallCredentials objects.
+ """
+ return ChannelCredentials(
+ _cygrpc.CompositeChannelCredentials(
+ tuple(single_call_credentials._credentials
+ for single_call_credentials in call_credentials),
+ channel_credentials._credentials))
+
+
+def ssl_server_credentials(private_key_certificate_chain_pairs,
+ root_certificates=None,
+ require_client_auth=False):
+ """Creates a ServerCredentials for use with an SSL-enabled Server.
+
+ Args:
+ private_key_certificate_chain_pairs: A list of pairs of the form
+ [PEM-encoded private key, PEM-encoded certificate chain].
+ root_certificates: An optional byte string of PEM-encoded client root
+ certificates that the server will use to verify client authentication.
+ If omitted, require_client_auth must also be False.
+ require_client_auth: A boolean indicating whether or not to require
+ clients to be authenticated. May only be True if root_certificates
+ is not None.
+
+ Returns:
+ A ServerCredentials for use with an SSL-enabled Server. Typically, this
+ object is an argument to add_secure_port() method during server setup.
+ """
+ if not private_key_certificate_chain_pairs:
+ raise ValueError(
+ 'At least one private key-certificate chain pair is required!')
+ elif require_client_auth and root_certificates is None:
+ raise ValueError(
+ 'Illegal to require client auth without providing root certificates!'
+ )
+ else:
+ return ServerCredentials(
+ _cygrpc.server_credentials_ssl(root_certificates, [
+ _cygrpc.SslPemKeyCertPair(key, pem)
+ for key, pem in private_key_certificate_chain_pairs
+ ], require_client_auth))
+
+
+def xds_server_credentials(fallback_credentials):
+ """Creates a ServerCredentials for use with xDS. This is an EXPERIMENTAL
+ API.
+
+ Args:
+ fallback_credentials: Credentials to use in case it is not possible to
+ establish a secure connection via xDS. No default value is provided.
+ """
+ return ServerCredentials(
+ _cygrpc.xds_server_credentials(fallback_credentials._credentials))
+
+
+def insecure_server_credentials():
+ """Creates a credentials object directing the server to use no credentials.
+ This is an EXPERIMENTAL API.
+
+ This object cannot be used directly in a call to `add_secure_port`.
+ Instead, it should be used to construct other credentials objects, e.g.
+ with xds_server_credentials.
+ """
+ return ServerCredentials(_cygrpc.insecure_server_credentials())
+
+
+def ssl_server_certificate_configuration(private_key_certificate_chain_pairs,
+ root_certificates=None):
+ """Creates a ServerCertificateConfiguration for use with a Server.
+
+ Args:
+ private_key_certificate_chain_pairs: A collection of pairs of
+ the form [PEM-encoded private key, PEM-encoded certificate
+ chain].
+ root_certificates: An optional byte string of PEM-encoded client root
+ certificates that the server will use to verify client authentication.
+
+ Returns:
+ A ServerCertificateConfiguration that can be returned in the certificate
+ configuration fetching callback.
+ """
+ if private_key_certificate_chain_pairs:
+ return ServerCertificateConfiguration(
+ _cygrpc.server_certificate_config_ssl(root_certificates, [
+ _cygrpc.SslPemKeyCertPair(key, pem)
+ for key, pem in private_key_certificate_chain_pairs
+ ]))
+ else:
+ raise ValueError(
+ 'At least one private key-certificate chain pair is required!')
+
+
+def dynamic_ssl_server_credentials(initial_certificate_configuration,
+ certificate_configuration_fetcher,
+ require_client_authentication=False):
+ """Creates a ServerCredentials for use with an SSL-enabled Server.
+
+ Args:
+ initial_certificate_configuration (ServerCertificateConfiguration): The
+ certificate configuration with which the server will be initialized.
+ certificate_configuration_fetcher (callable): A callable that takes no
+ arguments and should return a ServerCertificateConfiguration to
+ replace the server's current certificate, or None for no change
+ (i.e., the server will continue its current certificate
+ config). The library will call this callback on *every* new
+ client connection before starting the TLS handshake with the
+ client, thus allowing the user application to optionally
+ return a new ServerCertificateConfiguration that the server will then
+ use for the handshake.
+ require_client_authentication: A boolean indicating whether or not to
+ require clients to be authenticated.
+
+ Returns:
+ A ServerCredentials.
+ """
+ return ServerCredentials(
+ _cygrpc.server_credentials_ssl_dynamic_cert_config(
+ initial_certificate_configuration,
+ certificate_configuration_fetcher, require_client_authentication))
+
+
+@enum.unique
+class LocalConnectionType(enum.Enum):
+ """Types of local connection for local credential creation.
+
+ Attributes:
+ UDS: Unix domain socket connections
+ LOCAL_TCP: Local TCP connections.
+ """
+ UDS = _cygrpc.LocalConnectionType.uds
+ LOCAL_TCP = _cygrpc.LocalConnectionType.local_tcp
+
+
+def local_channel_credentials(local_connect_type=LocalConnectionType.LOCAL_TCP):
+ """Creates a local ChannelCredentials used for local connections.
+
+ This is an EXPERIMENTAL API.
+
+ Local credentials are used by local TCP endpoints (e.g. localhost:10000)
+ also UDS connections.
+
+ The connections created by local channel credentials are not
+ encrypted, but will be checked if they are local or not.
+ The UDS connections are considered secure by providing peer authentication
+ and data confidentiality while TCP connections are considered insecure.
+
+ It is allowed to transmit call credentials over connections created by
+ local channel credentials.
+
+ Local channel credentials are useful for 1) eliminating insecure_channel usage;
+ 2) enable unit testing for call credentials without setting up secrets.
+
+ Args:
+ local_connect_type: Local connection type (either
+ grpc.LocalConnectionType.UDS or grpc.LocalConnectionType.LOCAL_TCP)
+
+ Returns:
+ A ChannelCredentials for use with a local Channel
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_local(local_connect_type.value))
+
+
+def local_server_credentials(local_connect_type=LocalConnectionType.LOCAL_TCP):
+ """Creates a local ServerCredentials used for local connections.
+
+ This is an EXPERIMENTAL API.
+
+ Local credentials are used by local TCP endpoints (e.g. localhost:10000)
+ also UDS connections.
+
+ The connections created by local server credentials are not
+ encrypted, but will be checked if they are local or not.
+ The UDS connections are considered secure by providing peer authentication
+ and data confidentiality while TCP connections are considered insecure.
+
+ It is allowed to transmit call credentials over connections created by local
+ server credentials.
+
+ Local server credentials are useful for 1) eliminating insecure_channel usage;
+ 2) enable unit testing for call credentials without setting up secrets.
+
+ Args:
+ local_connect_type: Local connection type (either
+ grpc.LocalConnectionType.UDS or grpc.LocalConnectionType.LOCAL_TCP)
+
+ Returns:
+ A ServerCredentials for use with a local Server
+ """
+ return ServerCredentials(
+ _cygrpc.server_credentials_local(local_connect_type.value))
+
+
+def alts_channel_credentials(service_accounts=None):
+ """Creates a ChannelCredentials for use with an ALTS-enabled Channel.
+
+ This is an EXPERIMENTAL API.
+ ALTS credentials API can only be used in GCP environment as it relies on
+ handshaker service being available. For more info about ALTS see
+ https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security
+
+ Args:
+ service_accounts: A list of server identities accepted by the client.
+ If target service accounts are provided and none of them matches the
+ peer identity of the server, handshake will fail. The arg can be empty
+ if the client does not have any information about trusted server
+ identity.
+ Returns:
+ A ChannelCredentials for use with an ALTS-enabled Channel
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_alts(service_accounts or []))
+
+
+def alts_server_credentials():
+ """Creates a ServerCredentials for use with an ALTS-enabled connection.
+
+ This is an EXPERIMENTAL API.
+ ALTS credentials API can only be used in GCP environment as it relies on
+ handshaker service being available. For more info about ALTS see
+ https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security
+
+ Returns:
+ A ServerCredentials for use with an ALTS-enabled Server
+ """
+ return ServerCredentials(_cygrpc.server_credentials_alts())
+
+
+def compute_engine_channel_credentials(call_credentials):
+ """Creates a compute engine channel credential.
+
+ This credential can only be used in a GCP environment as it relies on
+ a handshaker service. For more info about ALTS, see
+ https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security
+
+ This channel credential is expected to be used as part of a composite
+ credential in conjunction with a call credentials that authenticates the
+ VM's default service account. If used with any other sort of call
+ credential, the connection may suddenly and unexpectedly begin failing RPCs.
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_compute_engine(
+ call_credentials._credentials))
+
+
+def channel_ready_future(channel):
+ """Creates a Future that tracks when a Channel is ready.
+
+ Cancelling the Future does not affect the channel's state machine.
+ It merely decouples the Future from channel state machine.
+
+ Args:
+ channel: A Channel object.
+
+ Returns:
+ A Future object that matures when the channel connectivity is
+ ChannelConnectivity.READY.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.channel_ready_future(channel)
+
+
+def insecure_channel(target, options=None, compression=None):
+ """Creates an insecure Channel to a server.
+
+ The returned Channel is thread-safe.
+
+ Args:
+ target: The server address
+ options: An optional list of key-value pairs (:term:`channel_arguments`
+ in gRPC Core runtime) to configure the channel.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel. This is an EXPERIMENTAL option.
+
+ Returns:
+ A Channel.
+ """
+ from grpc import _channel # pylint: disable=cyclic-import
+ return _channel.Channel(target, () if options is None else options, None,
+ compression)
+
+
+def secure_channel(target, credentials, options=None, compression=None):
+ """Creates a secure Channel to a server.
+
+ The returned Channel is thread-safe.
+
+ Args:
+ target: The server address.
+ credentials: A ChannelCredentials instance.
+ options: An optional list of key-value pairs (:term:`channel_arguments`
+ in gRPC Core runtime) to configure the channel.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel. This is an EXPERIMENTAL option.
+
+ Returns:
+ A Channel.
+ """
+ from grpc import _channel # pylint: disable=cyclic-import
+ from grpc.experimental import _insecure_channel_credentials
+ if credentials._credentials is _insecure_channel_credentials:
+ raise ValueError(
+ "secure_channel cannot be called with insecure credentials." +
+ " Call insecure_channel instead.")
+ return _channel.Channel(target, () if options is None else options,
+ credentials._credentials, compression)
+
+
+def intercept_channel(channel, *interceptors):
+ """Intercepts a channel through a set of interceptors.
+
+ Args:
+ channel: A Channel.
+ interceptors: Zero or more objects of type
+ UnaryUnaryClientInterceptor,
+ UnaryStreamClientInterceptor,
+ StreamUnaryClientInterceptor, or
+ StreamStreamClientInterceptor.
+ Interceptors are given control in the order they are listed.
+
+ Returns:
+ A Channel that intercepts each invocation via the provided interceptors.
+
+ Raises:
+ TypeError: If interceptor does not derive from any of
+ UnaryUnaryClientInterceptor,
+ UnaryStreamClientInterceptor,
+ StreamUnaryClientInterceptor, or
+ StreamStreamClientInterceptor.
+ """
+ from grpc import _interceptor # pylint: disable=cyclic-import
+ return _interceptor.intercept_channel(channel, *interceptors)
+
+
+def server(thread_pool,
+ handlers=None,
+ interceptors=None,
+ options=None,
+ maximum_concurrent_rpcs=None,
+ compression=None,
+ xds=False):
+ """Creates a Server with which RPCs can be serviced.
+
+ Args:
+ thread_pool: A futures.ThreadPoolExecutor to be used by the Server
+ to execute RPC handlers.
+ handlers: An optional list of GenericRpcHandlers used for executing RPCs.
+ More handlers may be added by calling add_generic_rpc_handlers any time
+ before the server is started.
+ interceptors: An optional list of ServerInterceptor objects that observe
+ and optionally manipulate the incoming RPCs before handing them over to
+ handlers. The interceptors are given control in the order they are
+ specified. This is an EXPERIMENTAL API.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC runtime)
+ to configure the channel.
+ maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
+ will service before returning RESOURCE_EXHAUSTED status, or None to
+ indicate no limit.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This compression algorithm will be used for the
+ lifetime of the server unless overridden. This is an EXPERIMENTAL option.
+ xds: If set to true, retrieves server configuration via xDS. This is an
+ EXPERIMENTAL option.
+
+ Returns:
+ A Server object.
+ """
+ from grpc import _server # pylint: disable=cyclic-import
+ return _server.create_server(thread_pool,
+ () if handlers is None else handlers,
+ () if interceptors is None else interceptors,
+ () if options is None else options,
+ maximum_concurrent_rpcs, compression, xds)
+
+
+@contextlib.contextmanager
+def _create_servicer_context(rpc_event, state, request_deserializer):
+ from grpc import _server # pylint: disable=cyclic-import
+ context = _server._Context(rpc_event, state, request_deserializer)
+ yield context
+ context._finalize_state() # pylint: disable=protected-access
+
+
+@enum.unique
+class Compression(enum.IntEnum):
+ """Indicates the compression method to be used for an RPC.
+
+ This enumeration is part of an EXPERIMENTAL API.
+
+ Attributes:
+ NoCompression: Do not use compression algorithm.
+ Deflate: Use "Deflate" compression algorithm.
+ Gzip: Use "Gzip" compression algorithm.
+ """
+ NoCompression = _compression.NoCompression
+ Deflate = _compression.Deflate
+ Gzip = _compression.Gzip
+
+
+################################### __all__ #################################
+
+__all__ = (
+ 'FutureTimeoutError',
+ 'FutureCancelledError',
+ 'Future',
+ 'ChannelConnectivity',
+ 'StatusCode',
+ 'Status',
+ 'RpcError',
+ 'RpcContext',
+ 'Call',
+ 'ChannelCredentials',
+ 'CallCredentials',
+ 'AuthMetadataContext',
+ 'AuthMetadataPluginCallback',
+ 'AuthMetadataPlugin',
+ 'Compression',
+ 'ClientCallDetails',
+ 'ServerCertificateConfiguration',
+ 'ServerCredentials',
+ 'LocalConnectionType',
+ 'UnaryUnaryMultiCallable',
+ 'UnaryStreamMultiCallable',
+ 'StreamUnaryMultiCallable',
+ 'StreamStreamMultiCallable',
+ 'UnaryUnaryClientInterceptor',
+ 'UnaryStreamClientInterceptor',
+ 'StreamUnaryClientInterceptor',
+ 'StreamStreamClientInterceptor',
+ 'Channel',
+ 'ServicerContext',
+ 'RpcMethodHandler',
+ 'HandlerCallDetails',
+ 'GenericRpcHandler',
+ 'ServiceRpcHandler',
+ 'Server',
+ 'ServerInterceptor',
+ 'unary_unary_rpc_method_handler',
+ 'unary_stream_rpc_method_handler',
+ 'stream_unary_rpc_method_handler',
+ 'stream_stream_rpc_method_handler',
+ 'method_handlers_generic_handler',
+ 'ssl_channel_credentials',
+ 'metadata_call_credentials',
+ 'access_token_call_credentials',
+ 'composite_call_credentials',
+ 'composite_channel_credentials',
+ 'compute_engine_channel_credentials',
+ 'local_channel_credentials',
+ 'local_server_credentials',
+ 'alts_channel_credentials',
+ 'alts_server_credentials',
+ 'ssl_server_credentials',
+ 'ssl_server_certificate_configuration',
+ 'dynamic_ssl_server_credentials',
+ 'channel_ready_future',
+ 'insecure_channel',
+ 'secure_channel',
+ 'intercept_channel',
+ 'server',
+ 'protos',
+ 'services',
+ 'protos_and_services',
+ 'xds_channel_credentials',
+ 'xds_server_credentials',
+ 'insecure_server_credentials',
+)
+
+############################### Extension Shims ################################
+
+# Here to maintain backwards compatibility; avoid using these in new code!
+try:
+ import grpc_tools
+ sys.modules.update({'grpc.tools': grpc_tools})
+except ImportError:
+ pass
+try:
+ import grpc_health
+ sys.modules.update({'grpc.health': grpc_health})
+except ImportError:
+ pass
+try:
+ import grpc_reflection
+ sys.modules.update({'grpc.reflection': grpc_reflection})
+except ImportError:
+ pass
+
+# Prevents import order issue in the case of renamed path.
+if sys.version_info >= (3, 6) and __name__ == "grpc":
+ from grpc import aio # pylint: disable=ungrouped-imports
+ sys.modules.update({'grpc.aio': aio})
diff --git a/contrib/python/grpcio/py2/grpc/_auth.py b/contrib/python/grpcio/py2/grpc/_auth.py
new file mode 100644
index 0000000000..1398251ccf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_auth.py
@@ -0,0 +1,62 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""GRPCAuthMetadataPlugins for standard authentication."""
+
+import inspect
+import sys
+
+import grpc
+
+
+def _sign_request(callback, token, error):
+ metadata = (('authorization', 'Bearer {}'.format(token)),)
+ callback(metadata, error)
+
+
+class GoogleCallCredentials(grpc.AuthMetadataPlugin):
+ """Metadata wrapper for GoogleCredentials from the oauth2client library."""
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+ # Hack to determine if these are JWT creds and we need to pass
+ # additional_claims when getting a token
+ if sys.version_info[0] == 2:
+ args = inspect.getargspec(credentials.get_access_token).args
+ else:
+ args = inspect.getfullargspec(credentials.get_access_token).args
+ self._is_jwt = 'additional_claims' in args
+
+ def __call__(self, context, callback):
+ try:
+ if self._is_jwt:
+ access_token = self._credentials.get_access_token(
+ additional_claims={
+ 'aud': context.service_url
+ }).access_token
+ else:
+ access_token = self._credentials.get_access_token().access_token
+ except Exception as exception: # pylint: disable=broad-except
+ _sign_request(callback, None, exception)
+ else:
+ _sign_request(callback, access_token, None)
+
+
+class AccessTokenAuthMetadataPlugin(grpc.AuthMetadataPlugin):
+ """Metadata wrapper for raw access token credentials."""
+
+ def __init__(self, access_token):
+ self._access_token = access_token
+
+ def __call__(self, context, callback):
+ _sign_request(callback, self._access_token, None)
diff --git a/contrib/python/grpcio/py2/grpc/_channel.py b/contrib/python/grpcio/py2/grpc/_channel.py
new file mode 100644
index 0000000000..b36e70f4a9
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_channel.py
@@ -0,0 +1,1585 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Invocation-side implementation of gRPC Python."""
+
+import copy
+import functools
+import logging
+import os
+import sys
+import threading
+import time
+
+import grpc
+from grpc import _common
+from grpc import _compression
+from grpc import _grpcio_metadata
+from grpc._cython import cygrpc
+import grpc.experimental
+
+_LOGGER = logging.getLogger(__name__)
+
+_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
+
+_EMPTY_FLAGS = 0
+
+# NOTE(rbellevi): No guarantees are given about the maintenance of this
+# environment variable.
+_DEFAULT_SINGLE_THREADED_UNARY_STREAM = os.getenv(
+ "GRPC_SINGLE_THREADED_UNARY_STREAM") is not None
+
+_UNARY_UNARY_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_UNARY_STREAM_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_UNARY_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_STREAM_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_status_on_client,
+)
+
+_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
+ 'Exception calling channel subscription callback!')
+
+_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '>')
+
+_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '\tdebug_error_string = "{}"\n'
+ '>')
+
+
+def _deadline(timeout):
+ return None if timeout is None else time.time() + timeout
+
+
+def _unknown_code_details(unknown_cygrpc_code, details):
+ return 'Server sent unknown code {} and details "{}"'.format(
+ unknown_cygrpc_code, details)
+
+
+class _RPCState(object):
+
+ def __init__(self, due, initial_metadata, trailing_metadata, code, details):
+ # `condition` guards all members of _RPCState. `notify_all` is called on
+ # `condition` when the state of the RPC has changed.
+ self.condition = threading.Condition()
+
+ # The cygrpc.OperationType objects representing events due from the RPC's
+ # completion queue. If an operation is in `due`, it is guaranteed that
+ # `operate()` has been called on a corresponding operation. But the
+ # converse is not true. That is, in the case of failed `operate()`
+ # calls, there may briefly be events in `due` that do not correspond to
+ # operations submitted to Core.
+ self.due = set(due)
+ self.initial_metadata = initial_metadata
+ self.response = None
+ self.trailing_metadata = trailing_metadata
+ self.code = code
+ self.details = details
+ self.debug_error_string = None
+
+ # The semantics of grpc.Future.cancel and grpc.Future.cancelled are
+ # slightly wonky, so they have to be tracked separately from the rest of the
+ # result of the RPC. This field tracks whether cancellation was requested
+ # prior to termination of the RPC.
+ self.cancelled = False
+ self.callbacks = []
+ self.fork_epoch = cygrpc.get_fork_epoch()
+
+ def reset_postfork_child(self):
+ self.condition = threading.Condition()
+
+
+def _abort(state, code, details):
+ if state.code is None:
+ state.code = code
+ state.details = details
+ if state.initial_metadata is None:
+ state.initial_metadata = ()
+ state.trailing_metadata = ()
+
+
+def _handle_event(event, state, response_deserializer):
+ callbacks = []
+ for batch_operation in event.batch_operations:
+ operation_type = batch_operation.type()
+ state.due.remove(operation_type)
+ if operation_type == cygrpc.OperationType.receive_initial_metadata:
+ state.initial_metadata = batch_operation.initial_metadata()
+ elif operation_type == cygrpc.OperationType.receive_message:
+ serialized_response = batch_operation.message()
+ if serialized_response is not None:
+ response = _common.deserialize(serialized_response,
+ response_deserializer)
+ if response is None:
+ details = 'Exception deserializing response!'
+ _abort(state, grpc.StatusCode.INTERNAL, details)
+ else:
+ state.response = response
+ elif operation_type == cygrpc.OperationType.receive_status_on_client:
+ state.trailing_metadata = batch_operation.trailing_metadata()
+ if state.code is None:
+ code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
+ batch_operation.code())
+ if code is None:
+ state.code = grpc.StatusCode.UNKNOWN
+ state.details = _unknown_code_details(
+ code, batch_operation.details())
+ else:
+ state.code = code
+ state.details = batch_operation.details()
+ state.debug_error_string = batch_operation.error_string()
+ callbacks.extend(state.callbacks)
+ state.callbacks = None
+ return callbacks
+
+
+def _event_handler(state, response_deserializer):
+
+ def handle_event(event):
+ with state.condition:
+ callbacks = _handle_event(event, state, response_deserializer)
+ state.condition.notify_all()
+ done = not state.due
+ for callback in callbacks:
+ try:
+ callback()
+ except Exception as e: # pylint: disable=broad-except
+ # NOTE(rbellevi): We suppress but log errors here so as not to
+ # kill the channel spin thread.
+ logging.error('Exception in callback %s: %s',
+ repr(callback.func), repr(e))
+ return done and state.fork_epoch >= cygrpc.get_fork_epoch()
+
+ return handle_event
+
+
+#pylint: disable=too-many-statements
+def _consume_request_iterator(request_iterator, state, call, request_serializer,
+ event_handler):
+ """Consume a request iterator supplied by the user."""
+
+ def consume_request_iterator(): # pylint: disable=too-many-branches
+ # Iterate over the request iterator until it is exhausted or an error
+ # condition is encountered.
+ while True:
+ return_from_user_request_generator_invoked = False
+ try:
+ # The thread may die in user-code. Do not block fork for this.
+ cygrpc.enter_user_request_generator()
+ request = next(request_iterator)
+ except StopIteration:
+ break
+ except Exception: # pylint: disable=broad-except
+ cygrpc.return_from_user_request_generator()
+ return_from_user_request_generator_invoked = True
+ code = grpc.StatusCode.UNKNOWN
+ details = 'Exception iterating requests!'
+ _LOGGER.exception(details)
+ call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
+ details)
+ _abort(state, code, details)
+ return
+ finally:
+ if not return_from_user_request_generator_invoked:
+ cygrpc.return_from_user_request_generator()
+ serialized_request = _common.serialize(request, request_serializer)
+ with state.condition:
+ if state.code is None and not state.cancelled:
+ if serialized_request is None:
+ code = grpc.StatusCode.INTERNAL
+ details = 'Exception serializing request!'
+ call.cancel(
+ _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
+ details)
+ _abort(state, code, details)
+ return
+ else:
+ state.due.add(cygrpc.OperationType.send_message)
+ operations = (cygrpc.SendMessageOperation(
+ serialized_request, _EMPTY_FLAGS),)
+ operating = call.operate(operations, event_handler)
+ if not operating:
+ state.due.remove(cygrpc.OperationType.send_message)
+ return
+
+ def _done():
+ return (state.code is not None or
+ cygrpc.OperationType.send_message
+ not in state.due)
+
+ _common.wait(state.condition.wait,
+ _done,
+ spin_cb=functools.partial(
+ cygrpc.block_if_fork_in_progress,
+ state))
+ if state.code is not None:
+ return
+ else:
+ return
+ with state.condition:
+ if state.code is None:
+ state.due.add(cygrpc.OperationType.send_close_from_client)
+ operations = (
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
+ operating = call.operate(operations, event_handler)
+ if not operating:
+ state.due.remove(
+ cygrpc.OperationType.send_close_from_client)
+
+ consumption_thread = cygrpc.ForkManagedThread(
+ target=consume_request_iterator)
+ consumption_thread.setDaemon(True)
+ consumption_thread.start()
+
+
+def _rpc_state_string(class_name, rpc_state):
+ """Calculates error string for RPC."""
+ with rpc_state.condition:
+ if rpc_state.code is None:
+ return '<{} object>'.format(class_name)
+ elif rpc_state.code is grpc.StatusCode.OK:
+ return _OK_RENDEZVOUS_REPR_FORMAT.format(class_name, rpc_state.code,
+ rpc_state.details)
+ else:
+ return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
+ class_name, rpc_state.code, rpc_state.details,
+ rpc_state.debug_error_string)
+
+
+class _InactiveRpcError(grpc.RpcError, grpc.Call, grpc.Future):
+ """An RPC error not tied to the execution of a particular RPC.
+
+ The RPC represented by the state object must not be in-progress or
+ cancelled.
+
+ Attributes:
+ _state: An instance of _RPCState.
+ """
+
+ def __init__(self, state):
+ with state.condition:
+ self._state = _RPCState((), copy.deepcopy(state.initial_metadata),
+ copy.deepcopy(state.trailing_metadata),
+ state.code, copy.deepcopy(state.details))
+ self._state.response = copy.copy(state.response)
+ self._state.debug_error_string = copy.copy(state.debug_error_string)
+
+ def initial_metadata(self):
+ return self._state.initial_metadata
+
+ def trailing_metadata(self):
+ return self._state.trailing_metadata
+
+ def code(self):
+ return self._state.code
+
+ def details(self):
+ return _common.decode(self._state.details)
+
+ def debug_error_string(self):
+ return _common.decode(self._state.debug_error_string)
+
+ def _repr(self):
+ return _rpc_state_string(self.__class__.__name__, self._state)
+
+ def __repr__(self):
+ return self._repr()
+
+ def __str__(self):
+ return self._repr()
+
+ def cancel(self):
+ """See grpc.Future.cancel."""
+ return False
+
+ def cancelled(self):
+ """See grpc.Future.cancelled."""
+ return False
+
+ def running(self):
+ """See grpc.Future.running."""
+ return False
+
+ def done(self):
+ """See grpc.Future.done."""
+ return True
+
+ def result(self, timeout=None): # pylint: disable=unused-argument
+ """See grpc.Future.result."""
+ raise self
+
+ def exception(self, timeout=None): # pylint: disable=unused-argument
+ """See grpc.Future.exception."""
+ return self
+
+ def traceback(self, timeout=None): # pylint: disable=unused-argument
+ """See grpc.Future.traceback."""
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn, timeout=None): # pylint: disable=unused-argument
+ """See grpc.Future.add_done_callback."""
+ fn(self)
+
+
+class _Rendezvous(grpc.RpcError, grpc.RpcContext):
+ """An RPC iterator.
+
+ Attributes:
+ _state: An instance of _RPCState.
+ _call: An instance of SegregatedCall or IntegratedCall.
+ In either case, the _call object is expected to have operate, cancel,
+ and next_event methods.
+ _response_deserializer: A callable taking bytes and return a Python
+ object.
+ _deadline: A float representing the deadline of the RPC in seconds. Or
+ possibly None, to represent an RPC with no deadline at all.
+ """
+
+ def __init__(self, state, call, response_deserializer, deadline):
+ super(_Rendezvous, self).__init__()
+ self._state = state
+ self._call = call
+ self._response_deserializer = response_deserializer
+ self._deadline = deadline
+
+ def is_active(self):
+ """See grpc.RpcContext.is_active"""
+ with self._state.condition:
+ return self._state.code is None
+
+ def time_remaining(self):
+ """See grpc.RpcContext.time_remaining"""
+ with self._state.condition:
+ if self._deadline is None:
+ return None
+ else:
+ return max(self._deadline - time.time(), 0)
+
+ def cancel(self):
+ """See grpc.RpcContext.cancel"""
+ with self._state.condition:
+ if self._state.code is None:
+ code = grpc.StatusCode.CANCELLED
+ details = 'Locally cancelled by application!'
+ self._call.cancel(
+ _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
+ self._state.cancelled = True
+ _abort(self._state, code, details)
+ self._state.condition.notify_all()
+ return True
+ else:
+ return False
+
+ def add_callback(self, callback):
+ """See grpc.RpcContext.add_callback"""
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return self._next()
+
+ def __next__(self):
+ return self._next()
+
+ def _next(self):
+ raise NotImplementedError()
+
+ def debug_error_string(self):
+ raise NotImplementedError()
+
+ def _repr(self):
+ return _rpc_state_string(self.__class__.__name__, self._state)
+
+ def __repr__(self):
+ return self._repr()
+
+ def __str__(self):
+ return self._repr()
+
+ def __del__(self):
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.code = grpc.StatusCode.CANCELLED
+ self._state.details = 'Cancelled upon garbage collection!'
+ self._state.cancelled = True
+ self._call.cancel(
+ _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
+ self._state.details)
+ self._state.condition.notify_all()
+
+
+class _SingleThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
+ """An RPC iterator operating entirely on a single thread.
+
+ The __next__ method of _SingleThreadedRendezvous does not depend on the
+ existence of any other thread, including the "channel spin thread".
+ However, this means that its interface is entirely synchronous. So this
+ class cannot completely fulfill the grpc.Future interface. The result,
+ exception, and traceback methods will never block and will instead raise
+ an exception if calling the method would result in blocking.
+
+ This means that these methods are safe to call from add_done_callback
+ handlers.
+ """
+
+ def _is_complete(self):
+ return self._state.code is not None
+
+ def cancelled(self):
+ with self._state.condition:
+ return self._state.cancelled
+
+ def running(self):
+ with self._state.condition:
+ return self._state.code is None
+
+ def done(self):
+ with self._state.condition:
+ return self._state.code is not None
+
+ def result(self, timeout=None):
+ """Returns the result of the computation or raises its exception.
+
+ This method will never block. Instead, it will raise an exception
+ if calling this method would otherwise result in blocking.
+
+ Since this method will never block, any `timeout` argument passed will
+ be ignored.
+ """
+ del timeout
+ with self._state.condition:
+ if not self._is_complete():
+ raise grpc.experimental.UsageError(
+ "_SingleThreadedRendezvous only supports result() when the RPC is complete."
+ )
+ if self._state.code is grpc.StatusCode.OK:
+ return self._state.response
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ raise self
+
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ This method will never block. Instead, it will raise an exception
+ if calling this method would otherwise result in blocking.
+
+ Since this method will never block, any `timeout` argument passed will
+ be ignored.
+ """
+ del timeout
+ with self._state.condition:
+ if not self._is_complete():
+ raise grpc.experimental.UsageError(
+ "_SingleThreadedRendezvous only supports exception() when the RPC is complete."
+ )
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ return self
+
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ This method will never block. Instead, it will raise an exception
+ if calling this method would otherwise result in blocking.
+
+ Since this method will never block, any `timeout` argument passed will
+ be ignored.
+ """
+ del timeout
+ with self._state.condition:
+ if not self._is_complete():
+ raise grpc.experimental.UsageError(
+ "_SingleThreadedRendezvous only supports traceback() when the RPC is complete."
+ )
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn):
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.callbacks.append(functools.partial(fn, self))
+ return
+
+ fn(self)
+
+ def initial_metadata(self):
+ """See grpc.Call.initial_metadata"""
+ with self._state.condition:
+ # NOTE(gnossen): Based on our initial call batch, we are guaranteed
+ # to receive initial metadata before any messages.
+ while self._state.initial_metadata is None:
+ self._consume_next_event()
+ return self._state.initial_metadata
+
+ def trailing_metadata(self):
+ """See grpc.Call.trailing_metadata"""
+ with self._state.condition:
+ if self._state.trailing_metadata is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get trailing metadata until RPC is completed.")
+ return self._state.trailing_metadata
+
+ def code(self):
+ """See grpc.Call.code"""
+ with self._state.condition:
+ if self._state.code is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get code until RPC is completed.")
+ return self._state.code
+
+ def details(self):
+ """See grpc.Call.details"""
+ with self._state.condition:
+ if self._state.details is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get details until RPC is completed.")
+ return _common.decode(self._state.details)
+
+ def _consume_next_event(self):
+ event = self._call.next_event()
+ with self._state.condition:
+ callbacks = _handle_event(event, self._state,
+ self._response_deserializer)
+ for callback in callbacks:
+ # NOTE(gnossen): We intentionally allow exceptions to bubble up
+ # to the user when running on a single thread.
+ callback()
+ return event
+
+ def _next_response(self):
+ while True:
+ self._consume_next_event()
+ with self._state.condition:
+ if self._state.response is not None:
+ response = self._state.response
+ self._state.response = None
+ return response
+ elif cygrpc.OperationType.receive_message not in self._state.due:
+ if self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ elif self._state.code is not None:
+ raise self
+
+ def _next(self):
+ with self._state.condition:
+ if self._state.code is None:
+ # We tentatively add the operation as expected and remove
+ # it if the enqueue operation fails. This allows us to guarantee that
+ # if an event has been submitted to the core completion queue,
+ # it is in `due`. If we waited until after a successful
+ # enqueue operation then a signal could interrupt this
+ # thread between the enqueue operation and the addition of the
+ # operation to `due`. This would cause an exception on the
+ # channel spin thread when the operation completes and no
+ # corresponding operation would be present in state.due.
+ # Note that, since `condition` is held through this block, there is
+ # no data race on `due`.
+ self._state.due.add(cygrpc.OperationType.receive_message)
+ operating = self._call.operate(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), None)
+ if not operating:
+ self._state.due.remove(cygrpc.OperationType.receive_message)
+ elif self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ else:
+ raise self
+ return self._next_response()
+
+ def debug_error_string(self):
+ with self._state.condition:
+ if self._state.debug_error_string is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get debug error string until RPC is completed.")
+ return _common.decode(self._state.debug_error_string)
+
+
+class _MultiThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
+ """An RPC iterator that depends on a channel spin thread.
+
+ This iterator relies upon a per-channel thread running in the background,
+ dequeueing events from the completion queue, and notifying threads waiting
+ on the threading.Condition object in the _RPCState object.
+
+ This extra thread allows _MultiThreadedRendezvous to fulfill the grpc.Future interface
+ and to mediate a bidirection streaming RPC.
+ """
+
+ def initial_metadata(self):
+ """See grpc.Call.initial_metadata"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.initial_metadata is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return self._state.initial_metadata
+
+ def trailing_metadata(self):
+ """See grpc.Call.trailing_metadata"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.trailing_metadata is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return self._state.trailing_metadata
+
+ def code(self):
+ """See grpc.Call.code"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.code is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return self._state.code
+
+ def details(self):
+ """See grpc.Call.details"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.details is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return _common.decode(self._state.details)
+
+ def debug_error_string(self):
+ with self._state.condition:
+
+ def _done():
+ return self._state.debug_error_string is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return _common.decode(self._state.debug_error_string)
+
+ def cancelled(self):
+ with self._state.condition:
+ return self._state.cancelled
+
+ def running(self):
+ with self._state.condition:
+ return self._state.code is None
+
+ def done(self):
+ with self._state.condition:
+ return self._state.code is not None
+
+ def _is_complete(self):
+ return self._state.code is not None
+
+ def result(self, timeout=None):
+ """Returns the result of the computation or raises its exception.
+
+ See grpc.Future.result for the full API contract.
+ """
+ with self._state.condition:
+ timed_out = _common.wait(self._state.condition.wait,
+ self._is_complete,
+ timeout=timeout)
+ if timed_out:
+ raise grpc.FutureTimeoutError()
+ else:
+ if self._state.code is grpc.StatusCode.OK:
+ return self._state.response
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ raise self
+
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ See grpc.Future.exception for the full API contract.
+ """
+ with self._state.condition:
+ timed_out = _common.wait(self._state.condition.wait,
+ self._is_complete,
+ timeout=timeout)
+ if timed_out:
+ raise grpc.FutureTimeoutError()
+ else:
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ return self
+
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ See grpc.future.traceback for the full API contract.
+ """
+ with self._state.condition:
+ timed_out = _common.wait(self._state.condition.wait,
+ self._is_complete,
+ timeout=timeout)
+ if timed_out:
+ raise grpc.FutureTimeoutError()
+ else:
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn):
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.callbacks.append(functools.partial(fn, self))
+ return
+
+ fn(self)
+
+ def _next(self):
+ with self._state.condition:
+ if self._state.code is None:
+ event_handler = _event_handler(self._state,
+ self._response_deserializer)
+ self._state.due.add(cygrpc.OperationType.receive_message)
+ operating = self._call.operate(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
+ event_handler)
+ if not operating:
+ self._state.due.remove(cygrpc.OperationType.receive_message)
+ elif self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ else:
+ raise self
+
+ def _response_ready():
+ return (self._state.response is not None or
+ (cygrpc.OperationType.receive_message
+ not in self._state.due and
+ self._state.code is not None))
+
+ _common.wait(self._state.condition.wait, _response_ready)
+ if self._state.response is not None:
+ response = self._state.response
+ self._state.response = None
+ return response
+ elif cygrpc.OperationType.receive_message not in self._state.due:
+ if self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ elif self._state.code is not None:
+ raise self
+
+
+def _start_unary_request(request, timeout, request_serializer):
+ deadline = _deadline(timeout)
+ serialized_request = _common.serialize(request, request_serializer)
+ if serialized_request is None:
+ state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
+ 'Exception serializing request!')
+ error = _InactiveRpcError(state)
+ return deadline, None, error
+ else:
+ return deadline, serialized_request, None
+
+
+def _end_unary_response_blocking(state, call, with_call, deadline):
+ if state.code is grpc.StatusCode.OK:
+ if with_call:
+ rendezvous = _MultiThreadedRendezvous(state, call, None, deadline)
+ return state.response, rendezvous
+ else:
+ return state.response
+ else:
+ raise _InactiveRpcError(state)
+
+
+def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
+ return (
+ (
+ cygrpc.SendInitialMetadataOperation(metadata,
+ initial_metadata_flags),
+ cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ ),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+
+
+def _stream_unary_invocation_operationses_and_tags(metadata,
+ initial_metadata_flags):
+ return tuple((
+ operations,
+ None,
+ ) for operations in _stream_unary_invocation_operationses(
+ metadata, initial_metadata_flags))
+
+
+def _determine_deadline(user_deadline):
+ parent_deadline = cygrpc.get_deadline_from_context()
+ if parent_deadline is None and user_deadline is None:
+ return None
+ elif parent_deadline is not None and user_deadline is None:
+ return parent_deadline
+ elif user_deadline is not None and parent_deadline is None:
+ return user_deadline
+ else:
+ return min(parent_deadline, user_deadline)
+
+
+class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
+ deadline, serialized_request, rendezvous = _start_unary_request(
+ request, timeout, self._request_serializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ if serialized_request is None:
+ return None, None, None, rendezvous
+ else:
+ state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
+ operations = (
+ cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ )
+ return state, operations, deadline, None
+
+ def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
+ compression):
+ state, operations, deadline, rendezvous = self._prepare(
+ request, timeout, metadata, wait_for_ready, compression)
+ if state is None:
+ raise rendezvous # pylint: disable-msg=raising-bad-type
+ else:
+ call = self._channel.segregated_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
+ self._method, None, _determine_deadline(deadline), metadata,
+ None if credentials is None else credentials._credentials, ((
+ operations,
+ None,
+ ),), self._context)
+ event = call.next_event()
+ _handle_event(event, state, self._response_deserializer)
+ return state, call
+
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ state, call, = self._blocking(request, timeout, metadata, credentials,
+ wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, False, None)
+
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ state, call, = self._blocking(request, timeout, metadata, credentials,
+ wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, True, None)
+
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ state, operations, deadline, rendezvous = self._prepare(
+ request, timeout, metadata, wait_for_ready, compression)
+ if state is None:
+ raise rendezvous # pylint: disable-msg=raising-bad-type
+ else:
+ event_handler = _event_handler(state, self._response_deserializer)
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
+ self._method, None, deadline, metadata,
+ None if credentials is None else credentials._credentials,
+ (operations,), event_handler, self._context)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer,
+ deadline)
+
+
+class _SingleThreadedUnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def __call__( # pylint: disable=too-many-locals
+ self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ deadline = _deadline(timeout)
+ serialized_request = _common.serialize(request,
+ self._request_serializer)
+ if serialized_request is None:
+ state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
+ 'Exception serializing request!')
+ raise _InactiveRpcError(state)
+
+ state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
+ call_credentials = None if credentials is None else credentials._credentials
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ operations = (
+ (cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS)),
+ (cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+ operations_and_tags = tuple((ops, None) for ops in operations)
+ call = self._channel.segregated_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, _determine_deadline(deadline), metadata, call_credentials,
+ operations_and_tags, self._context)
+ return _SingleThreadedRendezvous(state, call,
+ self._response_deserializer, deadline)
+
+
+class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def __call__( # pylint: disable=too-many-locals
+ self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ deadline, serialized_request, rendezvous = _start_unary_request(
+ request, timeout, self._request_serializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ if serialized_request is None:
+ raise rendezvous # pylint: disable-msg=raising-bad-type
+ else:
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
+ operationses = (
+ (
+ cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.SendMessageOperation(serialized_request,
+ _EMPTY_FLAGS),
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ ),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
+ self._method, None, _determine_deadline(deadline), metadata,
+ None if credentials is None else credentials._credentials,
+ operationses, _event_handler(state,
+ self._response_deserializer),
+ self._context)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer,
+ deadline)
+
+
+class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def _blocking(self, request_iterator, timeout, metadata, credentials,
+ wait_for_ready, compression):
+ deadline = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ call = self._channel.segregated_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, _determine_deadline(deadline), augmented_metadata,
+ None if credentials is None else credentials._credentials,
+ _stream_unary_invocation_operationses_and_tags(
+ augmented_metadata, initial_metadata_flags), self._context)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer, None)
+ while True:
+ event = call.next_event()
+ with state.condition:
+ _handle_event(event, state, self._response_deserializer)
+ state.condition.notify_all()
+ if not state.due:
+ break
+ return state, call
+
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ state, call, = self._blocking(request_iterator, timeout, metadata,
+ credentials, wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, False, None)
+
+ def with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ state, call, = self._blocking(request_iterator, timeout, metadata,
+ credentials, wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, True, None)
+
+ def future(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ deadline = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ event_handler = _event_handler(state, self._response_deserializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, deadline, augmented_metadata,
+ None if credentials is None else credentials._credentials,
+ _stream_unary_invocation_operationses(metadata,
+ initial_metadata_flags),
+ event_handler, self._context)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer, event_handler)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer, deadline)
+
+
+class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel, managed_call, method, request_serializer,
+ response_deserializer):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ deadline = _deadline(timeout)
+ state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ operationses = (
+ (
+ cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ ),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+ event_handler = _event_handler(state, self._response_deserializer)
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, _determine_deadline(deadline), augmented_metadata,
+ None if credentials is None else credentials._credentials,
+ operationses, event_handler, self._context)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer, event_handler)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer, deadline)
+
+
+class _InitialMetadataFlags(int):
+ """Stores immutable initial metadata flags"""
+
+ def __new__(cls, value=_EMPTY_FLAGS):
+ value &= cygrpc.InitialMetadataFlags.used_mask
+ return super(_InitialMetadataFlags, cls).__new__(cls, value)
+
+ def with_wait_for_ready(self, wait_for_ready):
+ if wait_for_ready is not None:
+ if wait_for_ready:
+ return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
+ cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
+ elif not wait_for_ready:
+ return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
+ cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
+ return self
+
+
+class _ChannelCallState(object):
+
+ def __init__(self, channel):
+ self.lock = threading.Lock()
+ self.channel = channel
+ self.managed_calls = 0
+ self.threading = False
+
+ def reset_postfork_child(self):
+ self.managed_calls = 0
+
+ def __del__(self):
+ try:
+ self.channel.close(cygrpc.StatusCode.cancelled,
+ 'Channel deallocated!')
+ except (TypeError, AttributeError):
+ pass
+
+
+def _run_channel_spin_thread(state):
+
+ def channel_spin():
+ while True:
+ cygrpc.block_if_fork_in_progress(state)
+ event = state.channel.next_call_event()
+ if event.completion_type == cygrpc.CompletionType.queue_timeout:
+ continue
+ call_completed = event.tag(event)
+ if call_completed:
+ with state.lock:
+ state.managed_calls -= 1
+ if state.managed_calls == 0:
+ return
+
+ channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
+ channel_spin_thread.setDaemon(True)
+ channel_spin_thread.start()
+
+
+def _channel_managed_call_management(state):
+
+ # pylint: disable=too-many-arguments
+ def create(flags, method, host, deadline, metadata, credentials,
+ operationses, event_handler, context):
+ """Creates a cygrpc.IntegratedCall.
+
+ Args:
+ flags: An integer bitfield of call flags.
+ method: The RPC method.
+ host: A host string for the created call.
+ deadline: A float to be the deadline of the created call or None if
+ the call is to have an infinite deadline.
+ metadata: The metadata for the call or None.
+ credentials: A cygrpc.CallCredentials or None.
+ operationses: An iterable of iterables of cygrpc.Operations to be
+ started on the call.
+ event_handler: A behavior to call to handle the events resultant from
+ the operations on the call.
+ context: Context object for distributed tracing.
+ Returns:
+ A cygrpc.IntegratedCall with which to conduct an RPC.
+ """
+ operationses_and_tags = tuple((
+ operations,
+ event_handler,
+ ) for operations in operationses)
+ with state.lock:
+ call = state.channel.integrated_call(flags, method, host, deadline,
+ metadata, credentials,
+ operationses_and_tags, context)
+ if state.managed_calls == 0:
+ state.managed_calls = 1
+ _run_channel_spin_thread(state)
+ else:
+ state.managed_calls += 1
+ return call
+
+ return create
+
+
+class _ChannelConnectivityState(object):
+
+ def __init__(self, channel):
+ self.lock = threading.RLock()
+ self.channel = channel
+ self.polling = False
+ self.connectivity = None
+ self.try_to_connect = False
+ self.callbacks_and_connectivities = []
+ self.delivering = False
+
+ def reset_postfork_child(self):
+ self.polling = False
+ self.connectivity = None
+ self.try_to_connect = False
+ self.callbacks_and_connectivities = []
+ self.delivering = False
+
+
+def _deliveries(state):
+ callbacks_needing_update = []
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback, callback_connectivity, = callback_and_connectivity
+ if callback_connectivity is not state.connectivity:
+ callbacks_needing_update.append(callback)
+ callback_and_connectivity[1] = state.connectivity
+ return callbacks_needing_update
+
+
+def _deliver(state, initial_connectivity, initial_callbacks):
+ connectivity = initial_connectivity
+ callbacks = initial_callbacks
+ while True:
+ for callback in callbacks:
+ cygrpc.block_if_fork_in_progress(state)
+ try:
+ callback(connectivity)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(
+ _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
+ with state.lock:
+ callbacks = _deliveries(state)
+ if callbacks:
+ connectivity = state.connectivity
+ else:
+ state.delivering = False
+ return
+
+
+def _spawn_delivery(state, callbacks):
+ delivering_thread = cygrpc.ForkManagedThread(target=_deliver,
+ args=(
+ state,
+ state.connectivity,
+ callbacks,
+ ))
+ delivering_thread.setDaemon(True)
+ delivering_thread.start()
+ state.delivering = True
+
+
+# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
+def _poll_connectivity(state, channel, initial_try_to_connect):
+ try_to_connect = initial_try_to_connect
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.
+ CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[connectivity])
+ callbacks = tuple(
+ callback for callback, unused_but_known_to_be_none_connectivity in
+ state.callbacks_and_connectivities)
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback_and_connectivity[1] = state.connectivity
+ if callbacks:
+ _spawn_delivery(state, callbacks)
+ while True:
+ event = channel.watch_connectivity_state(connectivity,
+ time.time() + 0.2)
+ cygrpc.block_if_fork_in_progress(state)
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.try_to_connect:
+ state.polling = False
+ state.connectivity = None
+ break
+ try_to_connect = state.try_to_connect
+ state.try_to_connect = False
+ if event.success or try_to_connect:
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ connectivity])
+ if not state.delivering:
+ callbacks = _deliveries(state)
+ if callbacks:
+ _spawn_delivery(state, callbacks)
+
+
+def _subscribe(state, callback, try_to_connect):
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.polling:
+ polling_thread = cygrpc.ForkManagedThread(
+ target=_poll_connectivity,
+ args=(state, state.channel, bool(try_to_connect)))
+ polling_thread.setDaemon(True)
+ polling_thread.start()
+ state.polling = True
+ state.callbacks_and_connectivities.append([callback, None])
+ elif not state.delivering and state.connectivity is not None:
+ _spawn_delivery(state, (callback,))
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append(
+ [callback, state.connectivity])
+ else:
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append([callback, None])
+
+
+def _unsubscribe(state, callback):
+ with state.lock:
+ for index, (subscribed_callback, unused_connectivity) in enumerate(
+ state.callbacks_and_connectivities):
+ if callback == subscribed_callback:
+ state.callbacks_and_connectivities.pop(index)
+ break
+
+
+def _augment_options(base_options, compression):
+ compression_option = _compression.create_channel_option(compression)
+ return tuple(base_options) + compression_option + ((
+ cygrpc.ChannelArgKey.primary_user_agent_string,
+ _USER_AGENT,
+ ),)
+
+
+def _separate_channel_options(options):
+ """Separates core channel options from Python channel options."""
+ core_options = []
+ python_options = []
+ for pair in options:
+ if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
+ python_options.append(pair)
+ else:
+ core_options.append(pair)
+ return python_options, core_options
+
+
+class Channel(grpc.Channel):
+ """A cygrpc.Channel-backed implementation of grpc.Channel."""
+
+ def __init__(self, target, options, credentials, compression):
+ """Constructor.
+
+ Args:
+ target: The target to which to connect.
+ options: Configuration options for the channel.
+ credentials: A cygrpc.ChannelCredentials or None.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+ """
+ python_options, core_options = _separate_channel_options(options)
+ self._single_threaded_unary_stream = _DEFAULT_SINGLE_THREADED_UNARY_STREAM
+ self._process_python_options(python_options)
+ self._channel = cygrpc.Channel(
+ _common.encode(target), _augment_options(core_options, compression),
+ credentials)
+ self._call_state = _ChannelCallState(self._channel)
+ self._connectivity_state = _ChannelConnectivityState(self._channel)
+ cygrpc.fork_register_channel(self)
+ if cygrpc.g_gevent_activated:
+ cygrpc.gevent_increment_channel_count()
+
+ def _process_python_options(self, python_options):
+ """Sets channel attributes according to python-only channel options."""
+ for pair in python_options:
+ if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
+ self._single_threaded_unary_stream = True
+
+ def subscribe(self, callback, try_to_connect=None):
+ _subscribe(self._connectivity_state, callback, try_to_connect)
+
+ def unsubscribe(self, callback):
+ _unsubscribe(self._connectivity_state, callback)
+
+ def unary_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _UnaryUnaryMultiCallable(
+ self._channel, _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def unary_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ # NOTE(rbellevi): Benchmarks have shown that running a unary-stream RPC
+ # on a single Python thread results in an appreciable speed-up. However,
+ # due to slight differences in capability, the multi-threaded variant
+ # remains the default.
+ if self._single_threaded_unary_stream:
+ return _SingleThreadedUnaryStreamMultiCallable(
+ self._channel, _common.encode(method), request_serializer,
+ response_deserializer)
+ else:
+ return _UnaryStreamMultiCallable(
+ self._channel,
+ _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer,
+ response_deserializer)
+
+ def stream_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _StreamUnaryMultiCallable(
+ self._channel, _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def stream_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ return _StreamStreamMultiCallable(
+ self._channel, _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def _unsubscribe_all(self):
+ state = self._connectivity_state
+ if state:
+ with state.lock:
+ del state.callbacks_and_connectivities[:]
+
+ def _close(self):
+ self._unsubscribe_all()
+ self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
+ cygrpc.fork_unregister_channel(self)
+ if cygrpc.g_gevent_activated:
+ cygrpc.gevent_decrement_channel_count()
+
+ def _close_on_fork(self):
+ self._unsubscribe_all()
+ self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
+ 'Channel closed due to fork')
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._close()
+ return False
+
+ def close(self):
+ self._close()
+
+ def __del__(self):
+ # TODO(https://github.com/grpc/grpc/issues/12531): Several releases
+ # after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
+ # here (or more likely, call self._close() here). We don't do this today
+ # because many valid use cases today allow the channel to be deleted
+ # immediately after stubs are created. After a sufficient period of time
+ # has passed for all users to be trusted to freeze out to their channels
+ # for as long as they are in use and to close them after using them,
+ # then deletion of this grpc._channel.Channel instance can be made to
+ # effect closure of the underlying cygrpc.Channel instance.
+ try:
+ self._unsubscribe_all()
+ except: # pylint: disable=bare-except
+ # Exceptions in __del__ are ignored by Python anyway, but they can
+ # keep spamming logs. Just silence them.
+ pass
diff --git a/contrib/python/grpcio/py2/grpc/_common.py b/contrib/python/grpcio/py2/grpc/_common.py
new file mode 100644
index 0000000000..d8f951456e
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_common.py
@@ -0,0 +1,168 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Shared implementation."""
+
+import logging
+import time
+
+import grpc
+from grpc._cython import cygrpc
+import six
+
+_LOGGER = logging.getLogger(__name__)
+
+CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
+ cygrpc.ConnectivityState.idle:
+ grpc.ChannelConnectivity.IDLE,
+ cygrpc.ConnectivityState.connecting:
+ grpc.ChannelConnectivity.CONNECTING,
+ cygrpc.ConnectivityState.ready:
+ grpc.ChannelConnectivity.READY,
+ cygrpc.ConnectivityState.transient_failure:
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ cygrpc.ConnectivityState.shutdown:
+ grpc.ChannelConnectivity.SHUTDOWN,
+}
+
+CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
+ cygrpc.StatusCode.ok: grpc.StatusCode.OK,
+ cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED,
+ cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN,
+ cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT,
+ cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED,
+ cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND,
+ cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS,
+ cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED,
+ cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED,
+ cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED,
+ cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION,
+ cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED,
+ cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE,
+ cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED,
+ cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL,
+ cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE,
+ cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS,
+}
+STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
+ grpc_code: cygrpc_code for cygrpc_code, grpc_code in six.iteritems(
+ CYGRPC_STATUS_CODE_TO_STATUS_CODE)
+}
+
+MAXIMUM_WAIT_TIMEOUT = 0.1
+
+_ERROR_MESSAGE_PORT_BINDING_FAILED = 'Failed to bind to address %s; set ' \
+ 'GRPC_VERBOSITY=debug environment variable to see detailed error message.'
+
+
+def encode(s):
+ if isinstance(s, bytes):
+ return s
+ else:
+ return s.encode('utf8')
+
+
+def decode(b):
+ if isinstance(b, bytes):
+ return b.decode('utf-8', 'replace')
+ return b
+
+
+def _transform(message, transformer, exception_message):
+ if transformer is None:
+ return message
+ else:
+ try:
+ return transformer(message)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(exception_message)
+ return None
+
+
+def serialize(message, serializer):
+ return _transform(message, serializer, 'Exception serializing message!')
+
+
+def deserialize(serialized_message, deserializer):
+ return _transform(serialized_message, deserializer,
+ 'Exception deserializing message!')
+
+
+def fully_qualified_method(group, method):
+ return '/{}/{}'.format(group, method)
+
+
+def _wait_once(wait_fn, timeout, spin_cb):
+ wait_fn(timeout=timeout)
+ if spin_cb is not None:
+ spin_cb()
+
+
+def wait(wait_fn, wait_complete_fn, timeout=None, spin_cb=None):
+ """Blocks waiting for an event without blocking the thread indefinitely.
+
+ See https://github.com/grpc/grpc/issues/19464 for full context. CPython's
+ `threading.Event.wait` and `threading.Condition.wait` methods, if invoked
+ without a timeout kwarg, may block the calling thread indefinitely. If the
+ call is made from the main thread, this means that signal handlers may not
+ run for an arbitrarily long period of time.
+
+ This wrapper calls the supplied wait function with an arbitrary short
+ timeout to ensure that no signal handler has to wait longer than
+ MAXIMUM_WAIT_TIMEOUT before executing.
+
+ Args:
+ wait_fn: A callable acceptable a single float-valued kwarg named
+ `timeout`. This function is expected to be one of `threading.Event.wait`
+ or `threading.Condition.wait`.
+ wait_complete_fn: A callable taking no arguments and returning a bool.
+ When this function returns true, it indicates that waiting should cease.
+ timeout: An optional float-valued number of seconds after which the wait
+ should cease.
+ spin_cb: An optional Callable taking no arguments and returning nothing.
+ This callback will be called on each iteration of the spin. This may be
+ used for, e.g. work related to forking.
+
+ Returns:
+ True if a timeout was supplied and it was reached. False otherwise.
+ """
+ if timeout is None:
+ while not wait_complete_fn():
+ _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb)
+ else:
+ end = time.time() + timeout
+ while not wait_complete_fn():
+ remaining = min(end - time.time(), MAXIMUM_WAIT_TIMEOUT)
+ if remaining < 0:
+ return True
+ _wait_once(wait_fn, remaining, spin_cb)
+ return False
+
+
+def validate_port_binding_result(address, port):
+ """Validates if the port binding succeed.
+
+ If the port returned by Core is 0, the binding is failed. However, in that
+ case, the Core API doesn't return a detailed failing reason. The best we
+ can do is raising an exception to prevent further confusion.
+
+ Args:
+ address: The address string to be bound.
+ port: An int returned by core
+ """
+ if port == 0:
+ # The Core API doesn't return a failure message. The best we can do
+ # is raising an exception to prevent further confusion.
+ raise RuntimeError(_ERROR_MESSAGE_PORT_BINDING_FAILED % address)
+ else:
+ return port
diff --git a/contrib/python/grpcio/py2/grpc/_compression.py b/contrib/python/grpcio/py2/grpc/_compression.py
new file mode 100644
index 0000000000..45339c3afe
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_compression.py
@@ -0,0 +1,55 @@
+# Copyright 2019 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from grpc._cython import cygrpc
+
+NoCompression = cygrpc.CompressionAlgorithm.none
+Deflate = cygrpc.CompressionAlgorithm.deflate
+Gzip = cygrpc.CompressionAlgorithm.gzip
+
+_METADATA_STRING_MAPPING = {
+ NoCompression: 'identity',
+ Deflate: 'deflate',
+ Gzip: 'gzip',
+}
+
+
+def _compression_algorithm_to_metadata_value(compression):
+ return _METADATA_STRING_MAPPING[compression]
+
+
+def compression_algorithm_to_metadata(compression):
+ return (cygrpc.GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY,
+ _compression_algorithm_to_metadata_value(compression))
+
+
+def create_channel_option(compression):
+ return ((cygrpc.GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
+ int(compression)),) if compression else ()
+
+
+def augment_metadata(metadata, compression):
+ if not metadata and not compression:
+ return None
+ base_metadata = tuple(metadata) if metadata else ()
+ compression_metadata = (
+ compression_algorithm_to_metadata(compression),) if compression else ()
+ return base_metadata + compression_metadata
+
+
+__all__ = (
+ "NoCompression",
+ "Deflate",
+ "Gzip",
+)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/__init__.py b/contrib/python/grpcio/py2/grpc/_cython/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/__init__.py b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pxd.pxi
new file mode 100644
index 0000000000..3eb10f5275
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pxd.pxi
@@ -0,0 +1,16 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef object _custom_op_on_c_call(int op, grpc_call *call)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pyx.pxi
new file mode 100644
index 0000000000..de4d71b819
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/_hooks.pyx.pxi
@@ -0,0 +1,35 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef object _custom_op_on_c_call(int op, grpc_call *call):
+ raise NotImplementedError("No custom hooks are implemented")
+
+def install_context_from_request_call_event(RequestCallEvent event):
+ pass
+
+def uninstall_context():
+ pass
+
+def build_census_context():
+ pass
+
+cdef class CensusContext:
+ pass
+
+def set_census_context_on_call(_CallState call_state, CensusContext census_ctx):
+ pass
+
+def get_deadline_from_context():
+ return None
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pxd.pxi
new file mode 100644
index 0000000000..867245a694
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pxd.pxi
@@ -0,0 +1,47 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _AioCall(GrpcCallWrapper):
+ cdef:
+ readonly AioChannel _channel
+ list _references
+ object _deadline
+ list _done_callbacks
+
+ # Caches the picked event loop, so we can avoid the 30ns overhead each
+ # time we need access to the event loop.
+ object _loop
+
+ # Flag indicates whether cancel being called or not. Cancellation from
+ # Core or peer works perfectly fine with normal procedure. However, we
+ # need this flag to clean up resources for cancellation from the
+ # application layer. Directly cancelling tasks might cause segfault
+ # because Core is holding a pointer for the callback handler.
+ bint _is_locally_cancelled
+
+ # Following attributes are used for storing the status of the call and
+ # the initial metadata. Waiters are used for pausing the execution of
+ # tasks that are asking for one of the field when they are not yet
+ # available.
+ readonly AioRpcStatus _status
+ readonly tuple _initial_metadata
+ list _waiters_status
+ list _waiters_initial_metadata
+
+ int _send_initial_metadata_flags
+
+ cdef void _create_grpc_call(self, object timeout, bytes method, CallCredentials credentials) except *
+ cdef void _set_status(self, AioRpcStatus status) except *
+ cdef void _set_initial_metadata(self, tuple initial_metadata) except *
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pyx.pxi
new file mode 100644
index 0000000000..7bce1850dc
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/call.pyx.pxi
@@ -0,0 +1,508 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+_EMPTY_FLAGS = 0
+_EMPTY_MASK = 0
+_IMMUTABLE_EMPTY_METADATA = tuple()
+
+_UNKNOWN_CANCELLATION_DETAILS = 'RPC cancelled for unknown reason.'
+_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '>')
+
+_NON_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '\tdebug_error_string = "{}"\n'
+ '>')
+
+
+cdef int _get_send_initial_metadata_flags(object wait_for_ready) except *:
+ cdef int flags = 0
+ # Wait-for-ready can be None, which means using default value in Core.
+ if wait_for_ready is not None:
+ flags |= InitialMetadataFlags.wait_for_ready_explicitly_set
+ if wait_for_ready:
+ flags |= InitialMetadataFlags.wait_for_ready
+
+ flags &= InitialMetadataFlags.used_mask
+ return flags
+
+
+cdef class _AioCall(GrpcCallWrapper):
+
+ def __cinit__(self, AioChannel channel, object deadline,
+ bytes method, CallCredentials call_credentials, object wait_for_ready):
+ init_grpc_aio()
+ self.call = NULL
+ self._channel = channel
+ self._loop = channel.loop
+ self._references = []
+ self._status = None
+ self._initial_metadata = None
+ self._waiters_status = []
+ self._waiters_initial_metadata = []
+ self._done_callbacks = []
+ self._is_locally_cancelled = False
+ self._deadline = deadline
+ self._send_initial_metadata_flags = _get_send_initial_metadata_flags(wait_for_ready)
+ self._create_grpc_call(deadline, method, call_credentials)
+
+ def __dealloc__(self):
+ if self.call:
+ grpc_call_unref(self.call)
+ shutdown_grpc_aio()
+
+ def _repr(self) -> str:
+ """Assembles the RPC representation string."""
+ # This needs to be loaded at run time once everything
+ # has been loaded.
+ from grpc import _common
+
+ if not self.done():
+ return '<{} object>'.format(self.__class__.__name__)
+
+ if self._status.code() is StatusCode.ok:
+ return _OK_CALL_REPRESENTATION.format(
+ self.__class__.__name__,
+ _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[self._status.code()],
+ self._status.details())
+ else:
+ return _NON_OK_CALL_REPRESENTATION.format(
+ self.__class__.__name__,
+ self._status.details(),
+ _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[self._status.code()],
+ self._status.debug_error_string())
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+ cdef void _create_grpc_call(self,
+ object deadline,
+ bytes method,
+ CallCredentials credentials) except *:
+ """Creates the corresponding Core object for this RPC.
+
+ For unary calls, the grpc_call lives shortly and can be destroyed after
+ invoke start_batch. However, if either side is streaming, the grpc_call
+ life span will be longer than one function. So, it would better save it
+ as an instance variable than a stack variable, which reflects its
+ nature in Core.
+ """
+ cdef grpc_slice method_slice
+ cdef gpr_timespec c_deadline = _timespec_from_time(deadline)
+ cdef grpc_call_error set_credentials_error
+
+ method_slice = grpc_slice_from_copied_buffer(
+ <const char *> method,
+ <size_t> len(method)
+ )
+ self.call = grpc_channel_create_call(
+ self._channel.channel,
+ NULL,
+ _EMPTY_MASK,
+ global_completion_queue(),
+ method_slice,
+ NULL,
+ c_deadline,
+ NULL
+ )
+
+ if credentials is not None:
+ set_credentials_error = grpc_call_set_credentials(self.call, credentials.c())
+ if set_credentials_error != GRPC_CALL_OK:
+ raise InternalError("Credentials couldn't have been set: {0}".format(set_credentials_error))
+
+ grpc_slice_unref(method_slice)
+
+ cdef void _set_status(self, AioRpcStatus status) except *:
+ cdef list waiters
+
+ # No more waiters should be expected since status has been set.
+ self._status = status
+
+ if self._initial_metadata is None:
+ self._set_initial_metadata(_IMMUTABLE_EMPTY_METADATA)
+
+ for waiter in self._waiters_status:
+ if not waiter.done():
+ waiter.set_result(None)
+ self._waiters_status = []
+
+ for callback in self._done_callbacks:
+ callback()
+
+ cdef void _set_initial_metadata(self, tuple initial_metadata) except *:
+ if self._initial_metadata is not None:
+ # Some gRPC calls might end before the initial metadata arrived in
+ # the Call object. That causes this method to be invoked twice: 1.
+ # filled with an empty metadata; 2. updated with the actual user
+ # provided metadata.
+ return
+
+ cdef list waiters
+
+ # No more waiters should be expected since initial metadata has been
+ # set.
+ self._initial_metadata = initial_metadata
+
+ for waiter in self._waiters_initial_metadata:
+ if not waiter.done():
+ waiter.set_result(None)
+ self._waiters_initial_metadata = []
+
+ def add_done_callback(self, callback):
+ if self.done():
+ callback()
+ else:
+ self._done_callbacks.append(callback)
+
+ def time_remaining(self):
+ if self._deadline is None:
+ return None
+ else:
+ return max(0, self._deadline - time.time())
+
+ def cancel(self, str details):
+ """Cancels the RPC in Core with given RPC status.
+
+ Above abstractions must invoke this method to set Core objects into
+ proper state.
+ """
+ self._is_locally_cancelled = True
+
+ cdef object details_bytes
+ cdef char *c_details
+ cdef grpc_call_error error
+
+ self._set_status(AioRpcStatus(
+ StatusCode.cancelled,
+ details,
+ None,
+ None,
+ ))
+
+ details_bytes = str_to_bytes(details)
+ self._references.append(details_bytes)
+ c_details = <char *>details_bytes
+ # By implementation, grpc_call_cancel_with_status always return OK
+ error = grpc_call_cancel_with_status(
+ self.call,
+ StatusCode.cancelled,
+ c_details,
+ NULL,
+ )
+ assert error == GRPC_CALL_OK
+
+ def done(self):
+ """Returns if the RPC call has finished.
+
+ Checks if the status has been provided, either
+ because the RPC finished or because was cancelled..
+
+ Returns:
+ True if the RPC can be considered finished.
+ """
+ return self._status is not None
+
+ def cancelled(self):
+ """Returns if the RPC was cancelled.
+
+ Returns:
+ True if the RPC was cancelled.
+ """
+ if not self.done():
+ return False
+
+ return self._status.code() == StatusCode.cancelled
+
+ async def status(self):
+ """Returns the status of the RPC call.
+
+ It returns the finshed status of the RPC. If the RPC
+ has not finished yet this function will wait until the RPC
+ gets finished.
+
+ Returns:
+ Finished status of the RPC as an AioRpcStatus object.
+ """
+ if self._status is not None:
+ return self._status
+
+ future = self._loop.create_future()
+ self._waiters_status.append(future)
+ await future
+
+ return self._status
+
+ def is_ok(self):
+ """Returns if the RPC is ended with ok."""
+ return self.done() and self._status.code() == StatusCode.ok
+
+ async def initial_metadata(self):
+ """Returns the initial metadata of the RPC call.
+
+ If the initial metadata has not been received yet this function will
+ wait until the RPC gets finished.
+
+ Returns:
+ The tuple object with the initial metadata.
+ """
+ if self._initial_metadata is not None:
+ return self._initial_metadata
+
+ future = self._loop.create_future()
+ self._waiters_initial_metadata.append(future)
+ await future
+
+ return self._initial_metadata
+
+ def is_locally_cancelled(self):
+ """Returns if the RPC was cancelled locally.
+
+ Returns:
+ True when was cancelled locally, False when was cancelled remotelly or
+ is still ongoing.
+ """
+ if self._is_locally_cancelled:
+ return True
+
+ return False
+
+ async def unary_unary(self,
+ bytes request,
+ tuple outbound_initial_metadata):
+ """Performs a unary unary RPC.
+
+ Args:
+ request: the serialized requests in bytes.
+ outbound_initial_metadata: optional outbound metadata.
+ """
+ cdef tuple ops
+
+ cdef SendInitialMetadataOperation initial_metadata_op = SendInitialMetadataOperation(
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags)
+ cdef SendMessageOperation send_message_op = SendMessageOperation(request, _EMPTY_FLAGS)
+ cdef SendCloseFromClientOperation send_close_op = SendCloseFromClientOperation(_EMPTY_FLAGS)
+ cdef ReceiveInitialMetadataOperation receive_initial_metadata_op = ReceiveInitialMetadataOperation(_EMPTY_FLAGS)
+ cdef ReceiveMessageOperation receive_message_op = ReceiveMessageOperation(_EMPTY_FLAGS)
+ cdef ReceiveStatusOnClientOperation receive_status_on_client_op = ReceiveStatusOnClientOperation(_EMPTY_FLAGS)
+
+ ops = (initial_metadata_op, send_message_op, send_close_op,
+ receive_initial_metadata_op, receive_message_op,
+ receive_status_on_client_op)
+
+ # Executes all operations in one batch.
+ # Might raise CancelledError, handling it in Python UnaryUnaryCall.
+ await execute_batch(self,
+ ops,
+ self._loop)
+
+ self._set_initial_metadata(receive_initial_metadata_op.initial_metadata())
+
+ cdef grpc_status_code code
+ code = receive_status_on_client_op.code()
+
+ self._set_status(AioRpcStatus(
+ code,
+ receive_status_on_client_op.details(),
+ receive_status_on_client_op.trailing_metadata(),
+ receive_status_on_client_op.error_string(),
+ ))
+
+ if code == StatusCode.ok:
+ return receive_message_op.message()
+ else:
+ return None
+
+ async def _handle_status_once_received(self):
+ """Handles the status sent by peer once received."""
+ cdef ReceiveStatusOnClientOperation op = ReceiveStatusOnClientOperation(_EMPTY_FLAGS)
+ cdef tuple ops = (op,)
+ await execute_batch(self, ops, self._loop)
+
+ # Halts if the RPC is locally cancelled
+ if self._is_locally_cancelled:
+ return
+
+ self._set_status(AioRpcStatus(
+ op.code(),
+ op.details(),
+ op.trailing_metadata(),
+ op.error_string(),
+ ))
+
+ async def receive_serialized_message(self):
+ """Receives one single raw message in bytes."""
+ cdef bytes received_message
+
+ # Receives a message. Returns None when failed:
+ # * EOF, no more messages to read;
+ # * The client application cancels;
+ # * The server sends final status.
+ received_message = await _receive_message(
+ self,
+ self._loop
+ )
+ if received_message is not None:
+ return received_message
+ else:
+ return EOF
+
+ async def send_serialized_message(self, bytes message):
+ """Sends one single raw message in bytes."""
+ await _send_message(self,
+ message,
+ None,
+ False,
+ self._loop)
+
+ async def send_receive_close(self):
+ """Half close the RPC on the client-side."""
+ cdef SendCloseFromClientOperation op = SendCloseFromClientOperation(_EMPTY_FLAGS)
+ cdef tuple ops = (op,)
+ await execute_batch(self, ops, self._loop)
+
+ async def initiate_unary_stream(self,
+ bytes request,
+ tuple outbound_initial_metadata):
+ """Implementation of the start of a unary-stream call."""
+ # Peer may prematurely end this RPC at any point. We need a corutine
+ # that watches if the server sends the final status.
+ status_task = self._loop.create_task(self._handle_status_once_received())
+
+ cdef tuple outbound_ops
+ cdef Operation initial_metadata_op = SendInitialMetadataOperation(
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags)
+ cdef Operation send_message_op = SendMessageOperation(
+ request,
+ _EMPTY_FLAGS)
+ cdef Operation send_close_op = SendCloseFromClientOperation(
+ _EMPTY_FLAGS)
+
+ outbound_ops = (
+ initial_metadata_op,
+ send_message_op,
+ send_close_op,
+ )
+
+ try:
+ # Sends out the request message.
+ await execute_batch(self,
+ outbound_ops,
+ self._loop)
+
+ # Receives initial metadata.
+ self._set_initial_metadata(
+ await _receive_initial_metadata(self,
+ self._loop),
+ )
+ except ExecuteBatchError as batch_error:
+ # Core should explain why this batch failed
+ await status_task
+
+ async def stream_unary(self,
+ tuple outbound_initial_metadata,
+ object metadata_sent_observer):
+ """Actual implementation of the complete unary-stream call.
+
+ Needs to pay extra attention to the raise mechanism. If we want to
+ propagate the final status exception, then we have to raise it.
+ Othersize, it would end normally and raise `StopAsyncIteration()`.
+ """
+ try:
+ # Sends out initial_metadata ASAP.
+ await _send_initial_metadata(self,
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags,
+ self._loop)
+ # Notify upper level that sending messages are allowed now.
+ metadata_sent_observer()
+
+ # Receives initial metadata.
+ self._set_initial_metadata(
+ await _receive_initial_metadata(self, self._loop)
+ )
+ except ExecuteBatchError:
+ # Core should explain why this batch failed
+ await self._handle_status_once_received()
+
+ # Allow upper layer to proceed only if the status is set
+ metadata_sent_observer()
+ return None
+
+ cdef tuple inbound_ops
+ cdef ReceiveMessageOperation receive_message_op = ReceiveMessageOperation(_EMPTY_FLAGS)
+ cdef ReceiveStatusOnClientOperation receive_status_on_client_op = ReceiveStatusOnClientOperation(_EMPTY_FLAGS)
+ inbound_ops = (receive_message_op, receive_status_on_client_op)
+
+ # Executes all operations in one batch.
+ await execute_batch(self,
+ inbound_ops,
+ self._loop)
+
+ cdef grpc_status_code code
+ code = receive_status_on_client_op.code()
+
+ self._set_status(AioRpcStatus(
+ code,
+ receive_status_on_client_op.details(),
+ receive_status_on_client_op.trailing_metadata(),
+ receive_status_on_client_op.error_string(),
+ ))
+
+ if code == StatusCode.ok:
+ return receive_message_op.message()
+ else:
+ return None
+
+ async def initiate_stream_stream(self,
+ tuple outbound_initial_metadata,
+ object metadata_sent_observer):
+ """Actual implementation of the complete stream-stream call.
+
+ Needs to pay extra attention to the raise mechanism. If we want to
+ propagate the final status exception, then we have to raise it.
+ Othersize, it would end normally and raise `StopAsyncIteration()`.
+ """
+ # Peer may prematurely end this RPC at any point. We need a corutine
+ # that watches if the server sends the final status.
+ status_task = self._loop.create_task(self._handle_status_once_received())
+
+ try:
+ # Sends out initial_metadata ASAP.
+ await _send_initial_metadata(self,
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags,
+ self._loop)
+ # Notify upper level that sending messages are allowed now.
+ metadata_sent_observer()
+
+ # Receives initial metadata.
+ self._set_initial_metadata(
+ await _receive_initial_metadata(self, self._loop)
+ )
+ except ExecuteBatchError as batch_error:
+ # Core should explain why this batch failed
+ await status_task
+
+ # Allow upper layer to proceed only if the status is set
+ metadata_sent_observer()
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi
new file mode 100644
index 0000000000..e54e510754
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi
@@ -0,0 +1,57 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class CallbackFailureHandler:
+ cdef str _core_function_name
+ cdef object _error_details
+ cdef object _exception_type
+
+ cdef handle(self, object future)
+
+
+cdef struct CallbackContext:
+ # C struct to store callback context in the form of pointers.
+ #
+ # Attributes:
+ # functor: A grpc_completion_queue_functor represents the
+ # callback function in the only way Core understands.
+ # waiter: An asyncio.Future object that fulfills when the callback is
+ # invoked by Core.
+ # failure_handler: A CallbackFailureHandler object that called when Core
+ # returns 'success == 0' state.
+ # wrapper: A self-reference to the CallbackWrapper to help life cycle
+ # management.
+ grpc_completion_queue_functor functor
+ cpython.PyObject *waiter
+ cpython.PyObject *loop
+ cpython.PyObject *failure_handler
+ cpython.PyObject *callback_wrapper
+
+
+cdef class CallbackWrapper:
+ cdef CallbackContext context
+ cdef object _reference_of_future
+ cdef object _reference_of_failure_handler
+
+ @staticmethod
+ cdef void functor_run(
+ grpc_completion_queue_functor* functor,
+ int succeed)
+
+ cdef grpc_completion_queue_functor *c_functor(self)
+
+
+cdef class GrpcCallWrapper:
+ cdef grpc_call* call
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
new file mode 100644
index 0000000000..f2d94a96e8
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
@@ -0,0 +1,184 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class CallbackFailureHandler:
+
+ def __cinit__(self,
+ str core_function_name,
+ object error_details,
+ object exception_type):
+ """Handles failure by raising exception."""
+ self._core_function_name = core_function_name
+ self._error_details = error_details
+ self._exception_type = exception_type
+
+ cdef handle(self, object future):
+ future.set_exception(self._exception_type(
+ 'Failed "%s": %s' % (self._core_function_name, self._error_details)
+ ))
+
+
+cdef class CallbackWrapper:
+
+ def __cinit__(self, object future, object loop, CallbackFailureHandler failure_handler):
+ self.context.functor.functor_run = self.functor_run
+ self.context.waiter = <cpython.PyObject*>future
+ self.context.loop = <cpython.PyObject*>loop
+ self.context.failure_handler = <cpython.PyObject*>failure_handler
+ self.context.callback_wrapper = <cpython.PyObject*>self
+ # NOTE(lidiz) Not using a list here, because this class is critical in
+ # data path. We should make it as efficient as possible.
+ self._reference_of_future = future
+ self._reference_of_failure_handler = failure_handler
+ # NOTE(lidiz) We need to ensure when Core invokes our callback, the
+ # callback function itself is not deallocated. Othersise, we will get
+ # a segfault. We can view this as Core holding a ref.
+ cpython.Py_INCREF(self)
+
+ @staticmethod
+ cdef void functor_run(
+ grpc_completion_queue_functor* functor,
+ int success):
+ cdef CallbackContext *context = <CallbackContext *>functor
+ cdef object waiter = <object>context.waiter
+ if not waiter.cancelled():
+ if success == 0:
+ (<CallbackFailureHandler>context.failure_handler).handle(waiter)
+ else:
+ waiter.set_result(None)
+ cpython.Py_DECREF(<object>context.callback_wrapper)
+
+ cdef grpc_completion_queue_functor *c_functor(self):
+ return &self.context.functor
+
+
+cdef CallbackFailureHandler CQ_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler(
+ 'grpc_completion_queue_shutdown',
+ 'Unknown',
+ InternalError)
+
+
+class ExecuteBatchError(InternalError):
+ """Raised when execute batch returns a failure from Core."""
+
+
+async def execute_batch(GrpcCallWrapper grpc_call_wrapper,
+ tuple operations,
+ object loop):
+ """The callback version of start batch operations."""
+ cdef _BatchOperationTag batch_operation_tag = _BatchOperationTag(None, operations, None)
+ batch_operation_tag.prepare()
+
+ cdef object future = loop.create_future()
+ cdef CallbackWrapper wrapper = CallbackWrapper(
+ future,
+ loop,
+ CallbackFailureHandler('execute_batch', operations, ExecuteBatchError))
+ cdef grpc_call_error error = grpc_call_start_batch(
+ grpc_call_wrapper.call,
+ batch_operation_tag.c_ops,
+ batch_operation_tag.c_nops,
+ wrapper.c_functor(), NULL)
+
+ if error != GRPC_CALL_OK:
+ raise ExecuteBatchError("Failed grpc_call_start_batch: {}".format(error))
+
+ await future
+
+ cdef grpc_event c_event
+ # Tag.event must be called, otherwise messages won't be parsed from C
+ batch_operation_tag.event(c_event)
+
+
+cdef prepend_send_initial_metadata_op(tuple ops, tuple metadata):
+ # Eventually, this function should be the only function that produces
+ # SendInitialMetadataOperation. So we have more control over the flag.
+ return (SendInitialMetadataOperation(
+ metadata,
+ _EMPTY_FLAG
+ ),) + ops
+
+
+async def _receive_message(GrpcCallWrapper grpc_call_wrapper,
+ object loop):
+ """Retrives parsed messages from Core.
+
+ The messages maybe already in Core's buffer, so there isn't a 1-to-1
+ mapping between this and the underlying "socket.read()". Also, eventually,
+ this function will end with an EOF, which reads empty message.
+ """
+ cdef ReceiveMessageOperation receive_op = ReceiveMessageOperation(_EMPTY_FLAG)
+ cdef tuple ops = (receive_op,)
+ try:
+ await execute_batch(grpc_call_wrapper, ops, loop)
+ except ExecuteBatchError as e:
+ # NOTE(lidiz) The receive message operation has two ways to indicate
+ # finish state : 1) returns empty message due to EOF; 2) fails inside
+ # the callback (e.g. cancelled).
+ #
+ # Since they all indicates finish, they are better be merged.
+ _LOGGER.debug('Failed to receive any message from Core')
+ # NOTE(lidiz) The returned message might be an empty bytes (aka. b'').
+ # Please explicitly check if it is None or falsey string object!
+ return receive_op.message()
+
+
+async def _send_message(GrpcCallWrapper grpc_call_wrapper,
+ bytes message,
+ Operation send_initial_metadata_op,
+ int write_flag,
+ object loop):
+ cdef SendMessageOperation op = SendMessageOperation(message, write_flag)
+ cdef tuple ops = (op,)
+ if send_initial_metadata_op is not None:
+ ops = (send_initial_metadata_op,) + ops
+ await execute_batch(grpc_call_wrapper, ops, loop)
+
+
+async def _send_initial_metadata(GrpcCallWrapper grpc_call_wrapper,
+ tuple metadata,
+ int flags,
+ object loop):
+ cdef SendInitialMetadataOperation op = SendInitialMetadataOperation(
+ metadata,
+ flags)
+ cdef tuple ops = (op,)
+ await execute_batch(grpc_call_wrapper, ops, loop)
+
+
+async def _receive_initial_metadata(GrpcCallWrapper grpc_call_wrapper,
+ object loop):
+ cdef ReceiveInitialMetadataOperation op = ReceiveInitialMetadataOperation(_EMPTY_FLAGS)
+ cdef tuple ops = (op,)
+ await execute_batch(grpc_call_wrapper, ops, loop)
+ return op.initial_metadata()
+
+async def _send_error_status_from_server(GrpcCallWrapper grpc_call_wrapper,
+ grpc_status_code code,
+ str details,
+ tuple trailing_metadata,
+ Operation send_initial_metadata_op,
+ object loop):
+ assert code != StatusCode.ok, 'Expecting non-ok status code.'
+ cdef SendStatusFromServerOperation op = SendStatusFromServerOperation(
+ trailing_metadata,
+ code,
+ details,
+ _EMPTY_FLAGS,
+ )
+ cdef tuple ops = (op,)
+ if send_initial_metadata_op is not None:
+ ops = (send_initial_metadata_op,) + ops
+ await execute_batch(grpc_call_wrapper, ops, loop)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pxd.pxi
new file mode 100644
index 0000000000..03b4990e48
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pxd.pxi
@@ -0,0 +1,27 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef enum AioChannelStatus:
+ AIO_CHANNEL_STATUS_UNKNOWN
+ AIO_CHANNEL_STATUS_READY
+ AIO_CHANNEL_STATUS_CLOSING
+ AIO_CHANNEL_STATUS_DESTROYED
+
+cdef class AioChannel:
+ cdef:
+ grpc_channel * channel
+ object loop
+ bytes _target
+ AioChannelStatus _status
+ bint _is_secure
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pyx.pxi
new file mode 100644
index 0000000000..0b97c2ba37
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/channel.pyx.pxi
@@ -0,0 +1,133 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class _WatchConnectivityFailed(Exception):
+ """Dedicated exception class for watch connectivity failed.
+
+ It might be failed due to deadline exceeded.
+ """
+cdef CallbackFailureHandler _WATCH_CONNECTIVITY_FAILURE_HANDLER = CallbackFailureHandler(
+ 'watch_connectivity_state',
+ 'Timed out',
+ _WatchConnectivityFailed)
+
+
+cdef class AioChannel:
+ def __cinit__(self, bytes target, tuple options, ChannelCredentials credentials, object loop):
+ init_grpc_aio()
+ if options is None:
+ options = ()
+ cdef _ChannelArgs channel_args = _ChannelArgs(options)
+ self._target = target
+ self.loop = loop
+ self._status = AIO_CHANNEL_STATUS_READY
+
+ if credentials is None:
+ self._is_secure = False
+ creds = grpc_insecure_credentials_create();
+ self.channel = grpc_channel_create(<char *>target,
+ creds,
+ channel_args.c_args())
+ grpc_channel_credentials_release(creds)
+ else:
+ self._is_secure = True
+ self.channel = grpc_channel_create(<char *>target,
+ <grpc_channel_credentials *> credentials.c(),
+ channel_args.c_args())
+
+ def __dealloc__(self):
+ shutdown_grpc_aio()
+
+ def __repr__(self):
+ class_name = self.__class__.__name__
+ id_ = id(self)
+ return f"<{class_name} {id_}>"
+
+ def check_connectivity_state(self, bint try_to_connect):
+ """A Cython wrapper for Core's check connectivity state API."""
+ if self._status == AIO_CHANNEL_STATUS_DESTROYED:
+ return ConnectivityState.shutdown
+ else:
+ return grpc_channel_check_connectivity_state(
+ self.channel,
+ try_to_connect,
+ )
+
+ async def watch_connectivity_state(self,
+ grpc_connectivity_state last_observed_state,
+ object deadline):
+ """Watch for one connectivity state change.
+
+ Keeps mirroring the behavior from Core, so we can easily switch to
+ other design of API if necessary.
+ """
+ if self._status in (AIO_CHANNEL_STATUS_DESTROYED, AIO_CHANNEL_STATUS_CLOSING):
+ raise UsageError('Channel is closed.')
+
+ cdef gpr_timespec c_deadline = _timespec_from_time(deadline)
+
+ cdef object future = self.loop.create_future()
+ cdef CallbackWrapper wrapper = CallbackWrapper(
+ future,
+ self.loop,
+ _WATCH_CONNECTIVITY_FAILURE_HANDLER)
+ grpc_channel_watch_connectivity_state(
+ self.channel,
+ last_observed_state,
+ c_deadline,
+ global_completion_queue(),
+ wrapper.c_functor())
+
+ try:
+ await future
+ except _WatchConnectivityFailed:
+ return False
+ else:
+ return True
+
+ def closing(self):
+ self._status = AIO_CHANNEL_STATUS_CLOSING
+
+ def close(self):
+ self._status = AIO_CHANNEL_STATUS_DESTROYED
+ grpc_channel_destroy(self.channel)
+
+ def closed(self):
+ return self._status in (AIO_CHANNEL_STATUS_CLOSING, AIO_CHANNEL_STATUS_DESTROYED)
+
+ def call(self,
+ bytes method,
+ object deadline,
+ object python_call_credentials,
+ object wait_for_ready):
+ """Assembles a Cython Call object.
+
+ Returns:
+ An _AioCall object.
+ """
+ if self.closed():
+ raise UsageError('Channel is closed.')
+
+ cdef CallCredentials cython_call_credentials
+ if python_call_credentials is not None:
+ if not self._is_secure:
+ raise UsageError("Call credentials are only valid on secure channels")
+
+ cython_call_credentials = python_call_credentials._credentials
+ else:
+ cython_call_credentials = None
+
+ return _AioCall(self, deadline, method, cython_call_credentials, wait_for_ready)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/common.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/common.pyx.pxi
new file mode 100644
index 0000000000..2bbe549890
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/common.pyx.pxi
@@ -0,0 +1,202 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cpython.version cimport PY_MAJOR_VERSION, PY_MINOR_VERSION
+
+TYPE_METADATA_STRING = "Tuple[Tuple[str, Union[str, bytes]]...]"
+
+
+cdef grpc_status_code get_status_code(object code) except *:
+ if isinstance(code, int):
+ if code >= StatusCode.ok and code <= StatusCode.data_loss:
+ return code
+ else:
+ return StatusCode.unknown
+ else:
+ try:
+ return code.value[0]
+ except (KeyError, AttributeError):
+ return StatusCode.unknown
+
+
+cdef object deserialize(object deserializer, bytes raw_message):
+ """Perform deserialization on raw bytes.
+
+ Failure to deserialize is a fatal error.
+ """
+ if deserializer:
+ return deserializer(raw_message)
+ else:
+ return raw_message
+
+
+cdef bytes serialize(object serializer, object message):
+ """Perform serialization on a message.
+
+ Failure to serialize is a fatal error.
+ """
+ if isinstance(message, str):
+ message = message.encode('utf-8')
+ if serializer:
+ return serializer(message)
+ else:
+ return message
+
+
+class _EOF:
+
+ def __bool__(self):
+ return False
+
+ def __len__(self):
+ return 0
+
+ def _repr(self) -> str:
+ return '<grpc.aio.EOF>'
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+
+EOF = _EOF()
+
+_COMPRESSION_METADATA_STRING_MAPPING = {
+ CompressionAlgorithm.none: 'identity',
+ CompressionAlgorithm.deflate: 'deflate',
+ CompressionAlgorithm.gzip: 'gzip',
+}
+
+class BaseError(Exception):
+ """The base class for exceptions generated by gRPC AsyncIO stack."""
+
+
+class UsageError(BaseError):
+ """Raised when the usage of API by applications is inappropriate.
+
+ For example, trying to invoke RPC on a closed channel, mixing two styles
+ of streaming API on the client side. This exception should not be
+ suppressed.
+ """
+
+
+class AbortError(BaseError):
+ """Raised when calling abort in servicer methods.
+
+ This exception should not be suppressed. Applications may catch it to
+ perform certain clean-up logic, and then re-raise it.
+ """
+
+
+class InternalError(BaseError):
+ """Raised upon unexpected errors in native code."""
+
+
+def schedule_coro_threadsafe(object coro, object loop):
+ try:
+ return loop.create_task(coro)
+ except RuntimeError as runtime_error:
+ if 'Non-thread-safe operation' in str(runtime_error):
+ return asyncio.run_coroutine_threadsafe(
+ coro,
+ loop,
+ )
+ else:
+ raise
+
+
+def async_generator_to_generator(object agen, object loop):
+ """Converts an async generator into generator."""
+ try:
+ while True:
+ future = asyncio.run_coroutine_threadsafe(
+ agen.__anext__(),
+ loop
+ )
+ response = future.result()
+ if response is EOF:
+ break
+ else:
+ yield response
+ except StopAsyncIteration:
+ # If StopAsyncIteration is raised, end this generator.
+ pass
+
+
+async def generator_to_async_generator(object gen, object loop, object thread_pool):
+ """Converts a generator into async generator.
+
+ The generator might block, so we need to delegate the iteration to thread
+ pool. Also, we can't simply delegate __next__ to the thread pool, otherwise
+ we will see following error:
+
+ TypeError: StopIteration interacts badly with generators and cannot be
+ raised into a Future
+ """
+ queue = asyncio.Queue(maxsize=1)
+
+ def yield_to_queue():
+ try:
+ for item in gen:
+ asyncio.run_coroutine_threadsafe(queue.put(item), loop).result()
+ finally:
+ asyncio.run_coroutine_threadsafe(queue.put(EOF), loop).result()
+
+ future = loop.run_in_executor(
+ thread_pool,
+ yield_to_queue,
+ )
+
+ while True:
+ response = await queue.get()
+ if response is EOF:
+ break
+ else:
+ yield response
+
+ # Port the exception if there is any
+ await future
+
+
+if PY_MAJOR_VERSION >= 3 and PY_MINOR_VERSION >= 7:
+ def get_working_loop():
+ """Returns a running event loop.
+
+ Due to a defect of asyncio.get_event_loop, its returned event loop might
+ not be set as the default event loop for the main thread.
+ """
+ try:
+ return asyncio.get_running_loop()
+ except RuntimeError:
+ return asyncio.get_event_loop()
+else:
+ def get_working_loop():
+ """Returns a running event loop."""
+ return asyncio.get_event_loop()
+
+
+def raise_if_not_valid_trailing_metadata(object metadata):
+ if not hasattr(metadata, '__iter__') or isinstance(metadata, dict):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ for item in metadata:
+ if not isinstance(item, tuple):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ if len(item) != 2:
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ if not isinstance(item[0], str):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ if not isinstance(item[1], str) and not isinstance(item[1], bytes):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi
new file mode 100644
index 0000000000..578131f7ee
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi
@@ -0,0 +1,52 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+ctypedef queue[grpc_event] cpp_event_queue
+
+
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "winsock2.h" nogil:
+ ctypedef uint32_t WIN_SOCKET "SOCKET"
+ WIN_SOCKET win_socket "socket" (int af, int type, int protocol)
+ int win_socket_send "send" (WIN_SOCKET s, const char *buf, int len, int flags)
+
+
+cdef void _unified_socket_write(int fd) nogil
+
+
+cdef class BaseCompletionQueue:
+ cdef grpc_completion_queue *_cq
+
+ cdef grpc_completion_queue* c_ptr(self)
+
+
+cdef class _BoundEventLoop:
+ cdef readonly object loop
+ cdef readonly object read_socket # socket.socket
+ cdef bint _has_reader
+
+
+cdef class PollerCompletionQueue(BaseCompletionQueue):
+ cdef bint _shutdown
+ cdef cpp_event_queue _queue
+ cdef mutex _queue_mutex
+ cdef object _poller_thread # threading.Thread
+ cdef int _write_fd
+ cdef object _read_socket # socket.socket
+ cdef object _write_socket # socket.socket
+ cdef dict _loops # Mapping[asyncio.AbstractLoop, _BoundEventLoop]
+
+ cdef void _poll(self) nogil
+ cdef shutdown(self)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi
new file mode 100644
index 0000000000..b9132c8560
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi
@@ -0,0 +1,174 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import socket
+
+cdef gpr_timespec _GPR_INF_FUTURE = gpr_inf_future(GPR_CLOCK_REALTIME)
+cdef float _POLL_AWAKE_INTERVAL_S = 0.2
+
+# This bool indicates if the event loop impl can monitor a given fd, or has
+# loop.add_reader method.
+cdef bint _has_fd_monitoring = True
+
+IF UNAME_SYSNAME == "Windows":
+ cdef void _unified_socket_write(int fd) nogil:
+ win_socket_send(<WIN_SOCKET>fd, b"1", 1, 0)
+ELSE:
+ cimport posix.unistd as unistd
+
+ cdef void _unified_socket_write(int fd) nogil:
+ unistd.write(fd, b"1", 1)
+
+
+def _handle_callback_wrapper(CallbackWrapper callback_wrapper, int success):
+ CallbackWrapper.functor_run(callback_wrapper.c_functor(), success)
+
+
+cdef class BaseCompletionQueue:
+
+ cdef grpc_completion_queue* c_ptr(self):
+ return self._cq
+
+
+cdef class _BoundEventLoop:
+
+ def __cinit__(self, object loop, object read_socket, object handler):
+ global _has_fd_monitoring
+ self.loop = loop
+ self.read_socket = read_socket
+ reader_function = functools.partial(
+ handler,
+ loop
+ )
+ # NOTE(lidiz) There isn't a way to cleanly pre-check if fd monitoring
+ # support is available or not. Checking the event loop policy is not
+ # good enough. The application can has its own loop implementation, or
+ # uses different types of event loops (e.g., 1 Proactor, 3 Selectors).
+ if _has_fd_monitoring:
+ try:
+ self.loop.add_reader(self.read_socket, reader_function)
+ self._has_reader = True
+ except NotImplementedError:
+ _has_fd_monitoring = False
+ self._has_reader = False
+
+ def close(self):
+ if self.loop:
+ if self._has_reader:
+ self.loop.remove_reader(self.read_socket)
+
+
+cdef class PollerCompletionQueue(BaseCompletionQueue):
+
+ def __cinit__(self):
+ self._cq = grpc_completion_queue_create_for_next(NULL)
+ self._shutdown = False
+ self._poller_thread = threading.Thread(target=self._poll_wrapper, daemon=True)
+ self._poller_thread.start()
+
+ self._read_socket, self._write_socket = socket.socketpair()
+ self._write_fd = self._write_socket.fileno()
+ self._loops = {}
+
+ # The read socket might be read by multiple threads. But only one of them will
+ # read the 1 byte sent by the poller thread. This setting is essential to allow
+ # multiple loops in multiple threads bound to the same poller.
+ self._read_socket.setblocking(False)
+
+ self._queue = cpp_event_queue()
+
+ def bind_loop(self, object loop):
+ if loop in self._loops:
+ return
+ else:
+ self._loops[loop] = _BoundEventLoop(loop, self._read_socket, self._handle_events)
+
+ cdef void _poll(self) nogil:
+ cdef grpc_event event
+ cdef CallbackContext *context
+
+ while not self._shutdown:
+ event = grpc_completion_queue_next(self._cq,
+ _GPR_INF_FUTURE,
+ NULL)
+
+ if event.type == GRPC_QUEUE_TIMEOUT:
+ with gil:
+ raise AssertionError("Core should not return GRPC_QUEUE_TIMEOUT!")
+ elif event.type == GRPC_QUEUE_SHUTDOWN:
+ self._shutdown = True
+ else:
+ self._queue_mutex.lock()
+ self._queue.push(event)
+ self._queue_mutex.unlock()
+ if _has_fd_monitoring:
+ _unified_socket_write(self._write_fd)
+ else:
+ with gil:
+ # Event loops can be paused or killed at any time. So,
+ # instead of deligate to any thread, the polling thread
+ # should handle the distribution of the event.
+ self._handle_events(None)
+
+ def _poll_wrapper(self):
+ with nogil:
+ self._poll()
+
+ cdef shutdown(self):
+ # Removes the socket hook from loops
+ for loop in self._loops:
+ self._loops.get(loop).close()
+
+ # TODO(https://github.com/grpc/grpc/issues/22365) perform graceful shutdown
+ grpc_completion_queue_shutdown(self._cq)
+ while not self._shutdown:
+ self._poller_thread.join(timeout=_POLL_AWAKE_INTERVAL_S)
+ grpc_completion_queue_destroy(self._cq)
+
+ # Clean up socket resources
+ self._read_socket.close()
+ self._write_socket.close()
+
+ def _handle_events(self, object context_loop):
+ cdef bytes data
+ if _has_fd_monitoring:
+ # If fd monitoring is working, clean the socket without blocking.
+ data = self._read_socket.recv(1)
+ cdef grpc_event event
+ cdef CallbackContext *context
+
+ while True:
+ self._queue_mutex.lock()
+ if self._queue.empty():
+ self._queue_mutex.unlock()
+ break
+ else:
+ event = self._queue.front()
+ self._queue.pop()
+ self._queue_mutex.unlock()
+
+ context = <CallbackContext *>event.tag
+ loop = <object>context.loop
+ if loop is context_loop:
+ # Executes callbacks: complete the future
+ CallbackWrapper.functor_run(
+ <grpc_completion_queue_functor *>event.tag,
+ event.success
+ )
+ else:
+ loop.call_soon_threadsafe(
+ _handle_callback_wrapper,
+ <CallbackWrapper>context.callback_wrapper,
+ event.success
+ )
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi
new file mode 100644
index 0000000000..ebf0660174
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi
@@ -0,0 +1,43 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+cdef class _AioState:
+ cdef object lock # threading.RLock
+ cdef int refcount
+ cdef object engine # AsyncIOEngine
+ cdef BaseCompletionQueue cq
+
+
+cdef grpc_completion_queue *global_completion_queue()
+
+
+cpdef init_grpc_aio()
+
+
+cpdef shutdown_grpc_aio()
+
+
+cdef extern from "src/core/lib/iomgr/timer_manager.h":
+ void grpc_timer_manager_set_threading(bint enabled)
+
+
+cdef extern from "src/core/lib/iomgr/iomgr_internal.h":
+ void grpc_set_default_iomgr_platform()
+
+
+cdef extern from "src/core/lib/iomgr/executor.h" namespace "grpc_core":
+ cdef cppclass Executor:
+ @staticmethod
+ void SetThreadingAll(bint enable)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi
new file mode 100644
index 0000000000..7f9f52da7c
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi
@@ -0,0 +1,114 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum
+
+cdef str _GRPC_ASYNCIO_ENGINE = os.environ.get('GRPC_ASYNCIO_ENGINE', 'poller').upper()
+cdef _AioState _global_aio_state = _AioState()
+
+
+class AsyncIOEngine(enum.Enum):
+ # NOTE(lidiz) the support for custom_io_manager is removed in favor of the
+ # EventEngine project, which will be the only IO platform in Core.
+ CUSTOM_IO_MANAGER = 'custom_io_manager'
+ POLLER = 'poller'
+
+
+cdef _default_asyncio_engine():
+ return AsyncIOEngine.POLLER
+
+
+cdef grpc_completion_queue *global_completion_queue():
+ return _global_aio_state.cq.c_ptr()
+
+
+cdef class _AioState:
+
+ def __cinit__(self):
+ self.lock = threading.RLock()
+ self.refcount = 0
+ self.engine = None
+ self.cq = None
+
+
+cdef _initialize_poller():
+ # Initializes gRPC Core, must be called before other Core API
+ grpc_init()
+
+ # Creates the only completion queue
+ _global_aio_state.cq = PollerCompletionQueue()
+
+
+cdef _actual_aio_initialization():
+ # Picks the engine for gRPC AsyncIO Stack
+ _global_aio_state.engine = AsyncIOEngine.__members__.get(
+ _GRPC_ASYNCIO_ENGINE,
+ _default_asyncio_engine(),
+ )
+ _LOGGER.debug('Using %s as I/O engine', _global_aio_state.engine)
+
+ # Initializes the process-level state accordingly
+ if _global_aio_state.engine is AsyncIOEngine.POLLER:
+ _initialize_poller()
+ else:
+ raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)
+
+
+def _grpc_shutdown_wrapper(_):
+ """A thin Python wrapper of Core's shutdown function.
+
+ Define functions are not allowed in "cdef" functions, and Cython complains
+ about a simple lambda with a C function.
+ """
+ grpc_shutdown()
+
+
+cdef _actual_aio_shutdown():
+ if _global_aio_state.engine is AsyncIOEngine.POLLER:
+ (<PollerCompletionQueue>_global_aio_state.cq).shutdown()
+ grpc_shutdown()
+ else:
+ raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)
+
+
+cdef _initialize_per_loop():
+ cdef object loop = get_working_loop()
+ if _global_aio_state.engine is AsyncIOEngine.POLLER:
+ _global_aio_state.cq.bind_loop(loop)
+
+
+cpdef init_grpc_aio():
+ """Initializes the gRPC AsyncIO module.
+
+ Expected to be invoked on critical class constructors.
+ E.g., AioChannel, AioServer.
+ """
+ with _global_aio_state.lock:
+ _global_aio_state.refcount += 1
+ if _global_aio_state.refcount == 1:
+ _actual_aio_initialization()
+ _initialize_per_loop()
+
+
+cpdef shutdown_grpc_aio():
+ """Shuts down the gRPC AsyncIO module.
+
+ Expected to be invoked on critical class destructors.
+ E.g., AioChannel, AioServer.
+ """
+ with _global_aio_state.lock:
+ assert _global_aio_state.refcount > 0
+ _global_aio_state.refcount -= 1
+ if not _global_aio_state.refcount:
+ _actual_aio_shutdown()
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi
new file mode 100644
index 0000000000..3780d8ddf2
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi
@@ -0,0 +1,29 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Exceptions for the aio version of the RPC calls."""
+
+
+cdef class AioRpcStatus(Exception):
+ cdef readonly:
+ grpc_status_code _code
+ str _details
+ # Per the spec, only client-side status has trailing metadata.
+ tuple _trailing_metadata
+ str _debug_error_string
+
+ cpdef grpc_status_code code(self)
+ cpdef str details(self)
+ cpdef tuple trailing_metadata(self)
+ cpdef str debug_error_string(self)
+ cdef grpc_status_code c_code(self)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi
new file mode 100644
index 0000000000..07669fc157
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi
@@ -0,0 +1,44 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Exceptions for the aio version of the RPC calls."""
+
+
+cdef class AioRpcStatus(Exception):
+
+ # The final status of gRPC is represented by three trailing metadata:
+ # `grpc-status`, `grpc-status-message`, abd `grpc-status-details`.
+ def __cinit__(self,
+ grpc_status_code code,
+ str details,
+ tuple trailing_metadata,
+ str debug_error_string):
+ self._code = code
+ self._details = details
+ self._trailing_metadata = trailing_metadata
+ self._debug_error_string = debug_error_string
+
+ cpdef grpc_status_code code(self):
+ return self._code
+
+ cpdef str details(self):
+ return self._details
+
+ cpdef tuple trailing_metadata(self):
+ return self._trailing_metadata
+
+ cpdef str debug_error_string(self):
+ return self._debug_error_string
+
+ cdef grpc_status_code c_code(self):
+ return <grpc_status_code>self._code
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pxd.pxi
new file mode 100644
index 0000000000..fe10c3883c
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pxd.pxi
@@ -0,0 +1,92 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class _HandlerCallDetails:
+ cdef readonly str method
+ cdef readonly tuple invocation_metadata
+
+
+cdef class RPCState(GrpcCallWrapper):
+ cdef grpc_call_details details
+ cdef grpc_metadata_array request_metadata
+ cdef AioServer server
+ # NOTE(lidiz) Under certain corner case, receiving the client close
+ # operation won't immediately fail ongoing RECV_MESSAGE operations. Here I
+ # added a flag to workaround this unexpected behavior.
+ cdef bint client_closed
+ cdef object abort_exception
+ cdef bint metadata_sent
+ cdef bint status_sent
+ cdef grpc_status_code status_code
+ cdef str status_details
+ cdef tuple trailing_metadata
+ cdef object compression_algorithm
+ cdef bint disable_next_compression
+ cdef object callbacks
+
+ cdef bytes method(self)
+ cdef tuple invocation_metadata(self)
+ cdef void raise_for_termination(self) except *
+ cdef int get_write_flag(self)
+ cdef Operation create_send_initial_metadata_op_if_not_sent(self)
+
+
+cdef class _ServicerContext:
+ cdef RPCState _rpc_state
+ cdef object _loop # asyncio.AbstractEventLoop
+ cdef object _request_deserializer # Callable[[bytes], Any]
+ cdef object _response_serializer # Callable[[Any], bytes]
+
+
+cdef class _SyncServicerContext:
+ cdef _ServicerContext _context
+ cdef list _callbacks
+ cdef object _loop # asyncio.AbstractEventLoop
+
+
+cdef class _MessageReceiver:
+ cdef _ServicerContext _servicer_context
+ cdef object _agen
+
+
+cdef enum AioServerStatus:
+ AIO_SERVER_STATUS_UNKNOWN
+ AIO_SERVER_STATUS_READY
+ AIO_SERVER_STATUS_RUNNING
+ AIO_SERVER_STATUS_STOPPED
+ AIO_SERVER_STATUS_STOPPING
+
+
+cdef class _ConcurrentRpcLimiter:
+ cdef int _maximum_concurrent_rpcs
+ cdef int _active_rpcs
+ cdef object _active_rpcs_condition # asyncio.Condition
+ cdef object _loop # asyncio.EventLoop
+
+
+cdef class AioServer:
+ cdef Server _server
+ cdef list _generic_handlers
+ cdef AioServerStatus _status
+ cdef object _loop # asyncio.EventLoop
+ cdef object _serving_task # asyncio.Task
+ cdef object _shutdown_lock # asyncio.Lock
+ cdef object _shutdown_completed # asyncio.Future
+ cdef CallbackWrapper _shutdown_callback_wrapper
+ cdef object _crash_exception # Exception
+ cdef tuple _interceptors
+ cdef object _thread_pool # concurrent.futures.ThreadPoolExecutor
+ cdef _ConcurrentRpcLimiter _limiter
+
+ cdef thread_pool(self)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pyx.pxi
new file mode 100644
index 0000000000..86406484b3
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/aio/server.pyx.pxi
@@ -0,0 +1,1093 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import inspect
+import traceback
+import functools
+
+
+cdef int _EMPTY_FLAG = 0
+cdef str _RPC_FINISHED_DETAILS = 'RPC already finished.'
+cdef str _SERVER_STOPPED_DETAILS = 'Server already stopped.'
+
+cdef _augment_metadata(tuple metadata, object compression):
+ if compression is None:
+ return metadata
+ else:
+ return ((
+ GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY,
+ _COMPRESSION_METADATA_STRING_MAPPING[compression]
+ ),) + metadata
+
+
+cdef class _HandlerCallDetails:
+ def __cinit__(self, str method, tuple invocation_metadata):
+ self.method = method
+ self.invocation_metadata = invocation_metadata
+
+
+class _ServerStoppedError(BaseError):
+ """Raised if the server is stopped."""
+
+
+cdef class RPCState:
+
+ def __cinit__(self, AioServer server):
+ init_grpc_aio()
+ self.call = NULL
+ self.server = server
+ grpc_metadata_array_init(&self.request_metadata)
+ grpc_call_details_init(&self.details)
+ self.client_closed = False
+ self.abort_exception = None
+ self.metadata_sent = False
+ self.status_sent = False
+ self.status_code = StatusCode.ok
+ self.status_details = ''
+ self.trailing_metadata = _IMMUTABLE_EMPTY_METADATA
+ self.compression_algorithm = None
+ self.disable_next_compression = False
+ self.callbacks = []
+
+ cdef bytes method(self):
+ return _slice_bytes(self.details.method)
+
+ cdef tuple invocation_metadata(self):
+ return _metadata(&self.request_metadata)
+
+ cdef void raise_for_termination(self) except *:
+ """Raise exceptions if RPC is not running.
+
+ Server method handlers may suppress the abort exception. We need to halt
+ the RPC execution in that case. This function needs to be called after
+ running application code.
+
+ Also, the server may stop unexpected. We need to check before calling
+ into Core functions, otherwise, segfault.
+ """
+ if self.abort_exception is not None:
+ raise self.abort_exception
+ if self.status_sent:
+ raise UsageError(_RPC_FINISHED_DETAILS)
+ if self.server._status == AIO_SERVER_STATUS_STOPPED:
+ raise _ServerStoppedError(_SERVER_STOPPED_DETAILS)
+
+ cdef int get_write_flag(self):
+ if self.disable_next_compression:
+ self.disable_next_compression = False
+ return WriteFlag.no_compress
+ else:
+ return _EMPTY_FLAG
+
+ cdef Operation create_send_initial_metadata_op_if_not_sent(self):
+ cdef SendInitialMetadataOperation op
+ if self.metadata_sent:
+ return None
+ else:
+ op = SendInitialMetadataOperation(
+ _augment_metadata(_IMMUTABLE_EMPTY_METADATA, self.compression_algorithm),
+ _EMPTY_FLAG
+ )
+ return op
+
+ def __dealloc__(self):
+ """Cleans the Core objects."""
+ grpc_call_details_destroy(&self.details)
+ grpc_metadata_array_destroy(&self.request_metadata)
+ if self.call:
+ grpc_call_unref(self.call)
+ shutdown_grpc_aio()
+
+
+cdef class _ServicerContext:
+
+ def __cinit__(self,
+ RPCState rpc_state,
+ object request_deserializer,
+ object response_serializer,
+ object loop):
+ self._rpc_state = rpc_state
+ self._request_deserializer = request_deserializer
+ self._response_serializer = response_serializer
+ self._loop = loop
+
+ async def read(self):
+ cdef bytes raw_message
+ self._rpc_state.raise_for_termination()
+
+ raw_message = await _receive_message(self._rpc_state, self._loop)
+ self._rpc_state.raise_for_termination()
+
+ if raw_message is None:
+ return EOF
+ else:
+ return deserialize(self._request_deserializer,
+ raw_message)
+
+ async def write(self, object message):
+ self._rpc_state.raise_for_termination()
+
+ await _send_message(self._rpc_state,
+ serialize(self._response_serializer, message),
+ self._rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ self._rpc_state.get_write_flag(),
+ self._loop)
+ self._rpc_state.metadata_sent = True
+
+ async def send_initial_metadata(self, object metadata):
+ self._rpc_state.raise_for_termination()
+
+ if self._rpc_state.metadata_sent:
+ raise UsageError('Send initial metadata failed: already sent')
+ else:
+ await _send_initial_metadata(
+ self._rpc_state,
+ _augment_metadata(tuple(metadata), self._rpc_state.compression_algorithm),
+ _EMPTY_FLAG,
+ self._loop
+ )
+ self._rpc_state.metadata_sent = True
+
+ async def abort(self,
+ object code,
+ str details='',
+ tuple trailing_metadata=_IMMUTABLE_EMPTY_METADATA):
+ if self._rpc_state.abort_exception is not None:
+ raise UsageError('Abort already called!')
+ else:
+ # Keeps track of the exception object. After abort happen, the RPC
+ # should stop execution. However, if users decided to suppress it, it
+ # could lead to undefined behavior.
+ self._rpc_state.abort_exception = AbortError('Locally aborted.')
+
+ if trailing_metadata == _IMMUTABLE_EMPTY_METADATA and self._rpc_state.trailing_metadata:
+ trailing_metadata = self._rpc_state.trailing_metadata
+ else:
+ raise_if_not_valid_trailing_metadata(trailing_metadata)
+ self._rpc_state.trailing_metadata = trailing_metadata
+
+ if details == '' and self._rpc_state.status_details:
+ details = self._rpc_state.status_details
+ else:
+ self._rpc_state.status_details = details
+
+ actual_code = get_status_code(code)
+ self._rpc_state.status_code = actual_code
+
+ self._rpc_state.status_sent = True
+ await _send_error_status_from_server(
+ self._rpc_state,
+ actual_code,
+ details,
+ trailing_metadata,
+ self._rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ self._loop
+ )
+
+ raise self._rpc_state.abort_exception
+
+ async def abort_with_status(self, object status):
+ await self.abort(status.code, status.details, status.trailing_metadata)
+
+ def set_trailing_metadata(self, object metadata):
+ raise_if_not_valid_trailing_metadata(metadata)
+ self._rpc_state.trailing_metadata = tuple(metadata)
+
+ def trailing_metadata(self):
+ return self._rpc_state.trailing_metadata
+
+ def invocation_metadata(self):
+ return self._rpc_state.invocation_metadata()
+
+ def set_code(self, object code):
+ self._rpc_state.status_code = get_status_code(code)
+
+ def code(self):
+ return self._rpc_state.status_code
+
+ def set_details(self, str details):
+ self._rpc_state.status_details = details
+
+ def details(self):
+ return self._rpc_state.status_details
+
+ def set_compression(self, object compression):
+ if self._rpc_state.metadata_sent:
+ raise RuntimeError('Compression setting must be specified before sending initial metadata')
+ else:
+ self._rpc_state.compression_algorithm = compression
+
+ def disable_next_message_compression(self):
+ self._rpc_state.disable_next_compression = True
+
+ def peer(self):
+ cdef char *c_peer = NULL
+ c_peer = grpc_call_get_peer(self._rpc_state.call)
+ peer = (<bytes>c_peer).decode('utf8')
+ gpr_free(c_peer)
+ return peer
+
+ def peer_identities(self):
+ cdef Call query_call = Call()
+ query_call.c_call = self._rpc_state.call
+ identities = peer_identities(query_call)
+ query_call.c_call = NULL
+ return identities
+
+ def peer_identity_key(self):
+ cdef Call query_call = Call()
+ query_call.c_call = self._rpc_state.call
+ identity_key = peer_identity_key(query_call)
+ query_call.c_call = NULL
+ if identity_key:
+ return identity_key.decode('utf8')
+ else:
+ return None
+
+ def auth_context(self):
+ cdef Call query_call = Call()
+ query_call.c_call = self._rpc_state.call
+ bytes_ctx = auth_context(query_call)
+ query_call.c_call = NULL
+ if bytes_ctx:
+ ctx = {}
+ for key in bytes_ctx:
+ ctx[key.decode('utf8')] = bytes_ctx[key]
+ return ctx
+ else:
+ return {}
+
+ def time_remaining(self):
+ if self._rpc_state.details.deadline.seconds == _GPR_INF_FUTURE.seconds:
+ return None
+ else:
+ return max(_time_from_timespec(self._rpc_state.details.deadline) - time.time(), 0)
+
+ def add_done_callback(self, callback):
+ cb = functools.partial(callback, self)
+ self._rpc_state.callbacks.append(cb)
+
+ def done(self):
+ return self._rpc_state.status_sent
+
+ def cancelled(self):
+ return self._rpc_state.status_code == StatusCode.cancelled
+
+
+cdef class _SyncServicerContext:
+ """Sync servicer context for sync handler compatibility."""
+
+ def __cinit__(self,
+ _ServicerContext context):
+ self._context = context
+ self._callbacks = []
+ self._loop = context._loop
+
+ def abort(self,
+ object code,
+ str details='',
+ tuple trailing_metadata=_IMMUTABLE_EMPTY_METADATA):
+ future = asyncio.run_coroutine_threadsafe(
+ self._context.abort(code, details, trailing_metadata),
+ self._loop)
+ # Abort should raise an AbortError
+ future.exception()
+
+ def send_initial_metadata(self, object metadata):
+ future = asyncio.run_coroutine_threadsafe(
+ self._context.send_initial_metadata(metadata),
+ self._loop)
+ future.result()
+
+ def set_trailing_metadata(self, object metadata):
+ self._context.set_trailing_metadata(metadata)
+
+ def invocation_metadata(self):
+ return self._context.invocation_metadata()
+
+ def set_code(self, object code):
+ self._context.set_code(code)
+
+ def set_details(self, str details):
+ self._context.set_details(details)
+
+ def set_compression(self, object compression):
+ self._context.set_compression(compression)
+
+ def disable_next_message_compression(self):
+ self._context.disable_next_message_compression()
+
+ def add_callback(self, object callback):
+ self._callbacks.append(callback)
+
+ def peer(self):
+ return self._context.peer()
+
+ def peer_identities(self):
+ return self._context.peer_identities()
+
+ def peer_identity_key(self):
+ return self._context.peer_identity_key()
+
+ def auth_context(self):
+ return self._context.auth_context()
+
+ def time_remaining(self):
+ return self._context.time_remaining()
+
+
+async def _run_interceptor(object interceptors, object query_handler,
+ object handler_call_details):
+ interceptor = next(interceptors, None)
+ if interceptor:
+ continuation = functools.partial(_run_interceptor, interceptors,
+ query_handler)
+ return await interceptor.intercept_service(continuation, handler_call_details)
+ else:
+ return query_handler(handler_call_details)
+
+
+def _is_async_handler(object handler):
+ """Inspect if a method handler is async or sync."""
+ return inspect.isawaitable(handler) or inspect.iscoroutinefunction(handler) or inspect.isasyncgenfunction(handler)
+
+
+async def _find_method_handler(str method, tuple metadata, list generic_handlers,
+ tuple interceptors):
+ def query_handlers(handler_call_details):
+ for generic_handler in generic_handlers:
+ method_handler = generic_handler.service(handler_call_details)
+ if method_handler is not None:
+ return method_handler
+ return None
+
+ cdef _HandlerCallDetails handler_call_details = _HandlerCallDetails(method,
+ metadata)
+ # interceptor
+ if interceptors:
+ return await _run_interceptor(iter(interceptors), query_handlers,
+ handler_call_details)
+ else:
+ return query_handlers(handler_call_details)
+
+
+async def _finish_handler_with_unary_response(RPCState rpc_state,
+ object unary_handler,
+ object request,
+ _ServicerContext servicer_context,
+ object response_serializer,
+ object loop):
+ """Finishes server method handler with a single response.
+
+ This function executes the application handler, and handles response
+ sending, as well as errors. It is shared between unary-unary and
+ stream-unary handlers.
+ """
+ # Executes application logic
+ cdef object response_message
+ cdef _SyncServicerContext sync_servicer_context
+
+ if _is_async_handler(unary_handler):
+ # Run async method handlers in this coroutine
+ response_message = await unary_handler(
+ request,
+ servicer_context,
+ )
+ else:
+ # Run sync method handlers in the thread pool
+ sync_servicer_context = _SyncServicerContext(servicer_context)
+ response_message = await loop.run_in_executor(
+ rpc_state.server.thread_pool(),
+ unary_handler,
+ request,
+ sync_servicer_context,
+ )
+ # Support sync-stack callback
+ for callback in sync_servicer_context._callbacks:
+ callback()
+
+ # Raises exception if aborted
+ rpc_state.raise_for_termination()
+
+ # Serializes the response message
+ cdef bytes response_raw
+ if rpc_state.status_code == StatusCode.ok:
+ response_raw = serialize(
+ response_serializer,
+ response_message,
+ )
+ else:
+ # Discards the response message if the status code is non-OK.
+ response_raw = b''
+
+ # Assembles the batch operations
+ cdef tuple finish_ops
+ finish_ops = (
+ SendMessageOperation(response_raw, rpc_state.get_write_flag()),
+ SendStatusFromServerOperation(
+ rpc_state.trailing_metadata,
+ rpc_state.status_code,
+ rpc_state.status_details,
+ _EMPTY_FLAGS,
+ ),
+ )
+ if not rpc_state.metadata_sent:
+ finish_ops = prepend_send_initial_metadata_op(
+ finish_ops,
+ None)
+ rpc_state.metadata_sent = True
+ rpc_state.status_sent = True
+ await execute_batch(rpc_state, finish_ops, loop)
+
+
+async def _finish_handler_with_stream_responses(RPCState rpc_state,
+ object stream_handler,
+ object request,
+ _ServicerContext servicer_context,
+ object loop):
+ """Finishes server method handler with multiple responses.
+
+ This function executes the application handler, and handles response
+ sending, as well as errors. It is shared between unary-stream and
+ stream-stream handlers.
+ """
+ cdef object async_response_generator
+ cdef object response_message
+
+ if inspect.iscoroutinefunction(stream_handler):
+ # Case 1: Coroutine async handler - using reader-writer API
+ # The handler uses reader / writer API, returns None.
+ await stream_handler(
+ request,
+ servicer_context,
+ )
+ else:
+ if inspect.isasyncgenfunction(stream_handler):
+ # Case 2: Async handler - async generator
+ # The handler uses async generator API
+ async_response_generator = stream_handler(
+ request,
+ servicer_context,
+ )
+ else:
+ # Case 3: Sync handler - normal generator
+ # NOTE(lidiz) Streaming handler in sync stack is either a generator
+ # function or a function returns a generator.
+ sync_servicer_context = _SyncServicerContext(servicer_context)
+ gen = stream_handler(request, sync_servicer_context)
+ async_response_generator = generator_to_async_generator(gen,
+ loop,
+ rpc_state.server.thread_pool())
+
+ # Consumes messages from the generator
+ async for response_message in async_response_generator:
+ # Raises exception if aborted
+ rpc_state.raise_for_termination()
+
+ await servicer_context.write(response_message)
+
+ # Raises exception if aborted
+ rpc_state.raise_for_termination()
+
+ # Sends the final status of this RPC
+ cdef SendStatusFromServerOperation op = SendStatusFromServerOperation(
+ rpc_state.trailing_metadata,
+ rpc_state.status_code,
+ rpc_state.status_details,
+ _EMPTY_FLAGS,
+ )
+
+ cdef tuple finish_ops = (op,)
+ if not rpc_state.metadata_sent:
+ finish_ops = prepend_send_initial_metadata_op(
+ finish_ops,
+ None
+ )
+ rpc_state.metadata_sent = True
+ rpc_state.status_sent = True
+ await execute_batch(rpc_state, finish_ops, loop)
+
+
+async def _handle_unary_unary_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Receives request message
+ cdef bytes request_raw = await _receive_message(rpc_state, loop)
+ if request_raw is None:
+ # The RPC was cancelled immediately after start on client side.
+ return
+
+ # Deserializes the request message
+ cdef object request_message = deserialize(
+ method_handler.request_deserializer,
+ request_raw,
+ )
+
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ None,
+ None,
+ loop,
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_unary_response(
+ rpc_state,
+ method_handler.unary_unary,
+ request_message,
+ servicer_context,
+ method_handler.response_serializer,
+ loop
+ )
+
+
+async def _handle_unary_stream_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Receives request message
+ cdef bytes request_raw = await _receive_message(rpc_state, loop)
+ if request_raw is None:
+ return
+
+ # Deserializes the request message
+ cdef object request_message = deserialize(
+ method_handler.request_deserializer,
+ request_raw,
+ )
+
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ method_handler.request_deserializer,
+ method_handler.response_serializer,
+ loop,
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_stream_responses(
+ rpc_state,
+ method_handler.unary_stream,
+ request_message,
+ servicer_context,
+ loop,
+ )
+
+
+cdef class _MessageReceiver:
+ """Bridge between the async generator API and the reader-writer API."""
+
+ def __cinit__(self, _ServicerContext servicer_context):
+ self._servicer_context = servicer_context
+ self._agen = None
+
+ async def _async_message_receiver(self):
+ """An async generator that receives messages."""
+ cdef object message
+ while True:
+ message = await self._servicer_context.read()
+ if message is not EOF:
+ yield message
+ else:
+ break
+
+ def __aiter__(self):
+ # Prevents never awaited warning if application never used the async generator
+ if self._agen is None:
+ self._agen = self._async_message_receiver()
+ return self._agen
+
+ async def __anext__(self):
+ return await self.__aiter__().__anext__()
+
+
+async def _handle_stream_unary_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ method_handler.request_deserializer,
+ None,
+ loop,
+ )
+
+ # Prepares the request generator
+ cdef object request_iterator
+ if _is_async_handler(method_handler.stream_unary):
+ request_iterator = _MessageReceiver(servicer_context)
+ else:
+ request_iterator = async_generator_to_generator(
+ _MessageReceiver(servicer_context),
+ loop
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_unary_response(
+ rpc_state,
+ method_handler.stream_unary,
+ request_iterator,
+ servicer_context,
+ method_handler.response_serializer,
+ loop
+ )
+
+
+async def _handle_stream_stream_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ method_handler.request_deserializer,
+ method_handler.response_serializer,
+ loop,
+ )
+
+ # Prepares the request generator
+ cdef object request_iterator
+ if _is_async_handler(method_handler.stream_stream):
+ request_iterator = _MessageReceiver(servicer_context)
+ else:
+ request_iterator = async_generator_to_generator(
+ _MessageReceiver(servicer_context),
+ loop
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_stream_responses(
+ rpc_state,
+ method_handler.stream_stream,
+ request_iterator,
+ servicer_context,
+ loop,
+ )
+
+
+async def _handle_exceptions(RPCState rpc_state, object rpc_coro, object loop):
+ try:
+ try:
+ await rpc_coro
+ except AbortError as e:
+ # Caught AbortError check if it is the same one
+ assert rpc_state.abort_exception is e, 'Abort error has been replaced!'
+ return
+ else:
+ # Check if the abort exception got suppressed
+ if rpc_state.abort_exception is not None:
+ _LOGGER.error(
+ 'Abort error unexpectedly suppressed: %s',
+ traceback.format_exception(rpc_state.abort_exception)
+ )
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except asyncio.CancelledError:
+ _LOGGER.debug('RPC cancelled for servicer method [%s]', _decode(rpc_state.method()))
+ except _ServerStoppedError:
+ _LOGGER.warning('Aborting method [%s] due to server stop.', _decode(rpc_state.method()))
+ except ExecuteBatchError:
+ # If client closed (aka. cancelled), ignore the failed batch operations.
+ if rpc_state.client_closed:
+ return
+ else:
+ raise
+ except Exception as e:
+ _LOGGER.exception('Unexpected [%s] raised by servicer method [%s]' % (
+ type(e).__name__,
+ _decode(rpc_state.method()),
+ ))
+ if not rpc_state.status_sent and rpc_state.server._status != AIO_SERVER_STATUS_STOPPED:
+ # Allows users to raise other types of exception with specified status code
+ if rpc_state.status_code == StatusCode.ok:
+ status_code = StatusCode.unknown
+ else:
+ status_code = rpc_state.status_code
+
+ rpc_state.status_sent = True
+ await _send_error_status_from_server(
+ rpc_state,
+ status_code,
+ 'Unexpected %s: %s' % (type(e), e),
+ rpc_state.trailing_metadata,
+ rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ loop
+ )
+
+
+cdef _add_callback_handler(object rpc_task, RPCState rpc_state):
+
+ def handle_callbacks(object unused_task):
+ try:
+ for callback in rpc_state.callbacks:
+ # The _ServicerContext object is bound in add_done_callback.
+ callback()
+ except:
+ _LOGGER.exception('Error in callback for method [%s]', _decode(rpc_state.method()))
+
+ rpc_task.add_done_callback(handle_callbacks)
+
+
+async def _handle_cancellation_from_core(object rpc_task,
+ RPCState rpc_state,
+ object loop):
+ cdef ReceiveCloseOnServerOperation op = ReceiveCloseOnServerOperation(_EMPTY_FLAG)
+ cdef tuple ops = (op,)
+
+ # Awaits cancellation from peer.
+ await execute_batch(rpc_state, ops, loop)
+ rpc_state.client_closed = True
+ # If 1) received cancel signal; 2) the Task is not finished; 3) the server
+ # wasn't replying final status. For condition 3, it might cause inaccurate
+ # log that an RPC is both aborted and cancelled.
+ if op.cancelled() and not rpc_task.done() and not rpc_state.status_sent:
+ # Injects `CancelledError` to halt the RPC coroutine
+ rpc_task.cancel()
+
+
+async def _schedule_rpc_coro(object rpc_coro,
+ RPCState rpc_state,
+ object loop):
+ # Schedules the RPC coroutine.
+ cdef object rpc_task = loop.create_task(_handle_exceptions(
+ rpc_state,
+ rpc_coro,
+ loop,
+ ))
+ _add_callback_handler(rpc_task, rpc_state)
+ await _handle_cancellation_from_core(rpc_task, rpc_state, loop)
+
+
+async def _handle_rpc(list generic_handlers, tuple interceptors,
+ RPCState rpc_state, object loop):
+ cdef object method_handler
+ # Finds the method handler (application logic)
+ method_handler = await _find_method_handler(
+ rpc_state.method().decode(),
+ rpc_state.invocation_metadata(),
+ generic_handlers,
+ interceptors,
+ )
+ if method_handler is None:
+ rpc_state.status_sent = True
+ await _send_error_status_from_server(
+ rpc_state,
+ StatusCode.unimplemented,
+ 'Method not found!',
+ _IMMUTABLE_EMPTY_METADATA,
+ rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ loop
+ )
+ return
+
+ # Handles unary-unary case
+ if not method_handler.request_streaming and not method_handler.response_streaming:
+ await _handle_unary_unary_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+ # Handles unary-stream case
+ if not method_handler.request_streaming and method_handler.response_streaming:
+ await _handle_unary_stream_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+ # Handles stream-unary case
+ if method_handler.request_streaming and not method_handler.response_streaming:
+ await _handle_stream_unary_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+ # Handles stream-stream case
+ if method_handler.request_streaming and method_handler.response_streaming:
+ await _handle_stream_stream_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+
+class _RequestCallError(Exception): pass
+
+cdef CallbackFailureHandler REQUEST_CALL_FAILURE_HANDLER = CallbackFailureHandler(
+ 'grpc_server_request_call', None, _RequestCallError)
+
+
+cdef CallbackFailureHandler SERVER_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler(
+ 'grpc_server_shutdown_and_notify',
+ None,
+ InternalError)
+
+
+cdef class _ConcurrentRpcLimiter:
+
+ def __cinit__(self, int maximum_concurrent_rpcs, object loop):
+ if maximum_concurrent_rpcs <= 0:
+ raise ValueError("maximum_concurrent_rpcs should be a postive integer")
+ self._maximum_concurrent_rpcs = maximum_concurrent_rpcs
+ self._active_rpcs = 0
+ self._active_rpcs_condition = asyncio.Condition()
+ self._loop = loop
+
+ async def check_before_request_call(self):
+ await self._active_rpcs_condition.acquire()
+ try:
+ predicate = lambda: self._active_rpcs < self._maximum_concurrent_rpcs
+ await self._active_rpcs_condition.wait_for(predicate)
+ self._active_rpcs += 1
+ finally:
+ self._active_rpcs_condition.release()
+
+ async def _decrease_active_rpcs_count_with_lock(self):
+ await self._active_rpcs_condition.acquire()
+ try:
+ self._active_rpcs -= 1
+ self._active_rpcs_condition.notify()
+ finally:
+ self._active_rpcs_condition.release()
+
+ def _decrease_active_rpcs_count(self, unused_future):
+ self._loop.create_task(self._decrease_active_rpcs_count_with_lock())
+
+ def decrease_once_finished(self, object rpc_task):
+ rpc_task.add_done_callback(self._decrease_active_rpcs_count)
+
+
+cdef class AioServer:
+
+ def __init__(self, loop, thread_pool, generic_handlers, interceptors,
+ options, maximum_concurrent_rpcs):
+ init_grpc_aio()
+ # NOTE(lidiz) Core objects won't be deallocated automatically.
+ # If AioServer.shutdown is not called, those objects will leak.
+ # TODO(rbellevi): Support xDS in aio server.
+ self._server = Server(options, False)
+ grpc_server_register_completion_queue(
+ self._server.c_server,
+ global_completion_queue(),
+ NULL
+ )
+
+ self._loop = loop
+ self._status = AIO_SERVER_STATUS_READY
+ self._generic_handlers = []
+ self.add_generic_rpc_handlers(generic_handlers)
+ self._serving_task = None
+
+ self._shutdown_lock = asyncio.Lock()
+ self._shutdown_completed = self._loop.create_future()
+ self._shutdown_callback_wrapper = CallbackWrapper(
+ self._shutdown_completed,
+ self._loop,
+ SERVER_SHUTDOWN_FAILURE_HANDLER)
+ self._crash_exception = None
+
+ if interceptors:
+ self._interceptors = tuple(interceptors)
+ else:
+ self._interceptors = ()
+
+ self._thread_pool = thread_pool
+ if maximum_concurrent_rpcs is not None:
+ self._limiter = _ConcurrentRpcLimiter(maximum_concurrent_rpcs,
+ loop)
+
+ def add_generic_rpc_handlers(self, object generic_rpc_handlers):
+ self._generic_handlers.extend(generic_rpc_handlers)
+
+ def add_insecure_port(self, address):
+ return self._server.add_http2_port(address)
+
+ def add_secure_port(self, address, server_credentials):
+ return self._server.add_http2_port(address,
+ server_credentials._credentials)
+
+ async def _request_call(self):
+ cdef grpc_call_error error
+ cdef RPCState rpc_state = RPCState(self)
+ cdef object future = self._loop.create_future()
+ cdef CallbackWrapper wrapper = CallbackWrapper(
+ future,
+ self._loop,
+ REQUEST_CALL_FAILURE_HANDLER)
+ error = grpc_server_request_call(
+ self._server.c_server, &rpc_state.call, &rpc_state.details,
+ &rpc_state.request_metadata,
+ global_completion_queue(), global_completion_queue(),
+ wrapper.c_functor()
+ )
+ if error != GRPC_CALL_OK:
+ raise InternalError("Error in grpc_server_request_call: %s" % error)
+
+ await future
+ return rpc_state
+
+ async def _server_main_loop(self,
+ object server_started):
+ self._server.start(backup_queue=False)
+ cdef RPCState rpc_state
+ server_started.set_result(True)
+
+ while True:
+ # When shutdown begins, no more new connections.
+ if self._status != AIO_SERVER_STATUS_RUNNING:
+ break
+
+ if self._limiter is not None:
+ await self._limiter.check_before_request_call()
+
+ # Accepts new request from Core
+ rpc_state = await self._request_call()
+
+ # Creates the dedicated RPC coroutine. If we schedule it right now,
+ # there is no guarantee if the cancellation listening coroutine is
+ # ready or not. So, we should control the ordering by scheduling
+ # the coroutine onto event loop inside of the cancellation
+ # coroutine.
+ rpc_coro = _handle_rpc(self._generic_handlers,
+ self._interceptors,
+ rpc_state,
+ self._loop)
+
+ # Fires off a task that listens on the cancellation from client.
+ rpc_task = self._loop.create_task(
+ _schedule_rpc_coro(
+ rpc_coro,
+ rpc_state,
+ self._loop
+ )
+ )
+
+ if self._limiter is not None:
+ self._limiter.decrease_once_finished(rpc_task)
+
+ def _serving_task_crash_handler(self, object task):
+ """Shutdown the server immediately if unexpectedly exited."""
+ if task.cancelled():
+ return
+ if task.exception() is None:
+ return
+ if self._status != AIO_SERVER_STATUS_STOPPING:
+ self._crash_exception = task.exception()
+ _LOGGER.exception(self._crash_exception)
+ self._loop.create_task(self.shutdown(None))
+
+ async def start(self):
+ if self._status == AIO_SERVER_STATUS_RUNNING:
+ return
+ elif self._status != AIO_SERVER_STATUS_READY:
+ raise UsageError('Server not in ready state')
+
+ self._status = AIO_SERVER_STATUS_RUNNING
+ cdef object server_started = self._loop.create_future()
+ self._serving_task = self._loop.create_task(self._server_main_loop(server_started))
+ self._serving_task.add_done_callback(self._serving_task_crash_handler)
+ # Needs to explicitly wait for the server to start up.
+ # Otherwise, the actual start time of the server is un-controllable.
+ await server_started
+
+ async def _start_shutting_down(self):
+ """Prepares the server to shutting down.
+
+ This coroutine function is NOT coroutine-safe.
+ """
+ # The shutdown callback won't be called until there is no live RPC.
+ grpc_server_shutdown_and_notify(
+ self._server.c_server,
+ global_completion_queue(),
+ self._shutdown_callback_wrapper.c_functor())
+
+ # Ensures the serving task (coroutine) exits.
+ try:
+ await self._serving_task
+ except _RequestCallError:
+ pass
+
+ async def shutdown(self, grace):
+ """Gracefully shutdown the Core server.
+
+ Application should only call shutdown once.
+
+ Args:
+ grace: An optional float indicating the length of grace period in
+ seconds.
+ """
+ if self._status == AIO_SERVER_STATUS_READY or self._status == AIO_SERVER_STATUS_STOPPED:
+ return
+
+ async with self._shutdown_lock:
+ if self._status == AIO_SERVER_STATUS_RUNNING:
+ self._server.is_shutting_down = True
+ self._status = AIO_SERVER_STATUS_STOPPING
+ await self._start_shutting_down()
+
+ if grace is None:
+ # Directly cancels all calls
+ grpc_server_cancel_all_calls(self._server.c_server)
+ await self._shutdown_completed
+ else:
+ try:
+ await asyncio.wait_for(
+ asyncio.shield(self._shutdown_completed),
+ grace,
+ )
+ except asyncio.TimeoutError:
+ # Cancels all ongoing calls by the end of grace period.
+ grpc_server_cancel_all_calls(self._server.c_server)
+ await self._shutdown_completed
+
+ async with self._shutdown_lock:
+ if self._status == AIO_SERVER_STATUS_STOPPING:
+ grpc_server_destroy(self._server.c_server)
+ self._server.c_server = NULL
+ self._server.is_shutdown = True
+ self._status = AIO_SERVER_STATUS_STOPPED
+
+ async def wait_for_termination(self, object timeout):
+ if timeout is None:
+ await self._shutdown_completed
+ else:
+ try:
+ await asyncio.wait_for(
+ asyncio.shield(self._shutdown_completed),
+ timeout,
+ )
+ except asyncio.TimeoutError:
+ if self._crash_exception is not None:
+ raise self._crash_exception
+ return True
+ if self._crash_exception is not None:
+ raise self._crash_exception
+ return False
+
+ def __dealloc__(self):
+ """Deallocation of Core objects are ensured by Python layer."""
+ # TODO(lidiz) if users create server, and then dealloc it immediately.
+ # There is a potential memory leak of created Core server.
+ if self._status != AIO_SERVER_STATUS_STOPPED:
+ _LOGGER.debug(
+ '__dealloc__ called on running server %s with status %d',
+ self,
+ self._status
+ )
+ shutdown_grpc_aio()
+
+ cdef thread_pool(self):
+ """Access the thread pool instance."""
+ return self._thread_pool
+
+ def is_running(self):
+ return self._status == AIO_SERVER_STATUS_RUNNING
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pxd.pxi
new file mode 100644
index 0000000000..251efe15b3
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pxd.pxi
@@ -0,0 +1,36 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef tuple _wrap_grpc_arg(grpc_arg arg)
+
+
+cdef grpc_arg _unwrap_grpc_arg(tuple wrapped_arg)
+
+
+cdef class _ChannelArg:
+
+ cdef grpc_arg c_argument
+
+ cdef void c(self, argument, references) except *
+
+
+cdef class _ChannelArgs:
+
+ cdef readonly tuple _arguments
+ cdef list _channel_args
+ cdef readonly list _references
+ cdef grpc_channel_args _c_arguments
+
+ cdef grpc_channel_args *c_args(self) except *
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pyx.pxi
new file mode 100644
index 0000000000..9df308cdbc
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/arguments.pyx.pxi
@@ -0,0 +1,85 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _GrpcArgWrapper:
+
+ cdef grpc_arg arg
+
+
+cdef tuple _wrap_grpc_arg(grpc_arg arg):
+ wrapped = _GrpcArgWrapper()
+ wrapped.arg = arg
+ return ("grpc.python._cygrpc._GrpcArgWrapper", wrapped)
+
+
+cdef grpc_arg _unwrap_grpc_arg(tuple wrapped_arg):
+ cdef _GrpcArgWrapper wrapped = wrapped_arg[1]
+ return wrapped.arg
+
+
+cdef class _ChannelArg:
+
+ cdef void c(self, argument, references) except *:
+ key, value = argument
+ cdef bytes encoded_key = _encode(key)
+ if encoded_key is not key:
+ references.append(encoded_key)
+ self.c_argument.key = encoded_key
+ if isinstance(value, int):
+ self.c_argument.type = GRPC_ARG_INTEGER
+ self.c_argument.value.integer = value
+ elif isinstance(value, (bytes, str, unicode,)):
+ self.c_argument.type = GRPC_ARG_STRING
+ encoded_value = _encode(value)
+ if encoded_value is not value:
+ references.append(encoded_value)
+ self.c_argument.value.string = encoded_value
+ elif isinstance(value, _GrpcArgWrapper):
+ self.c_argument = (<_GrpcArgWrapper>value).arg
+ elif hasattr(value, '__int__'):
+ # Pointer objects must override __int__() to return
+ # the underlying C address (Python ints are word size). The
+ # lifecycle of the pointer is fixed to the lifecycle of the
+ # python object wrapping it.
+ self.c_argument.type = GRPC_ARG_POINTER
+ self.c_argument.value.pointer.vtable = &default_vtable
+ self.c_argument.value.pointer.address = <void*>(<intptr_t>int(value))
+ else:
+ raise TypeError(
+ 'Expected int, bytes, or behavior, got {}'.format(type(value)))
+
+
+cdef class _ChannelArgs:
+
+ def __cinit__(self, arguments):
+ self._arguments = () if arguments is None else tuple(arguments)
+ self._channel_args = []
+ self._references = []
+ self._c_arguments.arguments_length = len(self._arguments)
+ if self._c_arguments.arguments_length != 0:
+ self._c_arguments.arguments = <grpc_arg *>gpr_malloc(
+ self._c_arguments.arguments_length * sizeof(grpc_arg))
+ for index, argument in enumerate(self._arguments):
+ channel_arg = _ChannelArg()
+ channel_arg.c(argument, self._references)
+ self._c_arguments.arguments[index] = channel_arg.c_argument
+ self._channel_args.append(channel_arg)
+
+ cdef grpc_channel_args *c_args(self) except *:
+ return &self._c_arguments
+
+ def __dealloc__(self):
+ if self._c_arguments.arguments != NULL:
+ gpr_free(self._c_arguments.arguments)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pxd.pxi
new file mode 100644
index 0000000000..8babeb4536
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pxd.pxi
@@ -0,0 +1,20 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Call:
+
+ cdef grpc_call *c_call
+ cdef list references
+
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pyx.pxi
new file mode 100644
index 0000000000..f68e166b17
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -0,0 +1,97 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Call:
+
+ def __cinit__(self):
+ # Create an *empty* call
+ fork_handlers_and_grpc_init()
+ self.c_call = NULL
+ self.references = []
+
+ def _start_batch(self, operations, tag, retain_self):
+ if not self.is_valid:
+ raise ValueError("invalid call object cannot be used from Python")
+ cdef _BatchOperationTag batch_operation_tag = _BatchOperationTag(
+ tag, operations, self if retain_self else None)
+ batch_operation_tag.prepare()
+ cpython.Py_INCREF(batch_operation_tag)
+ cdef grpc_call_error error
+ with nogil:
+ error = grpc_call_start_batch(
+ self.c_call, batch_operation_tag.c_ops, batch_operation_tag.c_nops,
+ <cpython.PyObject *>batch_operation_tag, NULL)
+ return error
+
+ def start_client_batch(self, operations, tag):
+ # We don't reference this call in the operations tag because
+ # it should be cancelled when it goes out of scope
+ return self._start_batch(operations, tag, False)
+
+ def start_server_batch(self, operations, tag):
+ return self._start_batch(operations, tag, True)
+
+ def cancel(
+ self, grpc_status_code error_code=GRPC_STATUS__DO_NOT_USE,
+ details=None):
+ details = str_to_bytes(details)
+ if not self.is_valid:
+ raise ValueError("invalid call object cannot be used from Python")
+ if (details is None) != (error_code == GRPC_STATUS__DO_NOT_USE):
+ raise ValueError("if error_code is specified, so must details "
+ "(and vice-versa)")
+ cdef grpc_call_error result
+ cdef char *c_details = NULL
+ if error_code != GRPC_STATUS__DO_NOT_USE:
+ self.references.append(details)
+ c_details = details
+ with nogil:
+ result = grpc_call_cancel_with_status(
+ self.c_call, error_code, c_details, NULL)
+ return result
+ else:
+ with nogil:
+ result = grpc_call_cancel(self.c_call, NULL)
+ return result
+
+ def set_credentials(self, CallCredentials call_credentials not None):
+ cdef grpc_call_credentials *c_call_credentials = call_credentials.c()
+ cdef grpc_call_error call_error = grpc_call_set_credentials(
+ self.c_call, c_call_credentials)
+ grpc_call_credentials_release(c_call_credentials)
+ return call_error
+
+ def peer(self):
+ cdef char *peer = NULL
+ with nogil:
+ peer = grpc_call_get_peer(self.c_call)
+ result = <bytes>peer
+ with nogil:
+ gpr_free(peer)
+ return result
+
+ def __dealloc__(self):
+ with nogil:
+ if self.c_call != NULL:
+ grpc_call_unref(self.c_call)
+ grpc_shutdown()
+
+ # The object *should* always be valid from Python. Used for debugging.
+ @property
+ def is_valid(self):
+ return self.c_call != NULL
+
+ def _custom_op_on_c_call(self, int op):
+ return _custom_op_on_c_call(op, self.c_call)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pxd.pxi
new file mode 100644
index 0000000000..eb27f2df7a
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pxd.pxi
@@ -0,0 +1,74 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef _check_call_error_no_metadata(c_call_error)
+
+
+cdef _check_and_raise_call_error_no_metadata(c_call_error)
+
+
+cdef _check_call_error(c_call_error, metadata)
+
+
+cdef class _CallState:
+
+ cdef grpc_call *c_call
+ cdef set due
+
+
+cdef class _ChannelState:
+
+ cdef object condition
+ cdef grpc_channel *c_channel
+ # A boolean field indicating that the channel is open (if True) or is being
+ # closed (i.e. a call to close is currently executing) or is closed (if
+ # False).
+ # TODO(https://github.com/grpc/grpc/issues/3064): Eliminate "is being closed"
+ # a state in which condition may be acquired by any thread, eliminate this
+ # field and just use the NULLness of c_channel as an indication that the
+ # channel is closed.
+ cdef object open
+ cdef object closed_reason
+
+ # A dict from _BatchOperationTag to _CallState
+ cdef dict integrated_call_states
+ cdef grpc_completion_queue *c_call_completion_queue
+
+ # A set of _CallState
+ cdef set segregated_call_states
+
+ cdef set connectivity_due
+ cdef grpc_completion_queue *c_connectivity_completion_queue
+
+
+cdef class IntegratedCall:
+
+ cdef _ChannelState _channel_state
+ cdef _CallState _call_state
+
+
+cdef class SegregatedCall:
+
+ cdef _ChannelState _channel_state
+ cdef _CallState _call_state
+ cdef grpc_completion_queue *_c_completion_queue
+
+
+cdef class Channel:
+
+ cdef _ChannelState _state
+
+ # TODO(https://github.com/grpc/grpc/issues/15662): Eliminate this.
+ cdef tuple _arguments
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pyx.pxi
new file mode 100644
index 0000000000..d49a4210f7
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -0,0 +1,516 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
+ 'Internal gRPC call error %d. ' +
+ 'Please report to https://github.com/grpc/grpc/issues')
+
+
+cdef str _call_error_metadata(metadata):
+ return 'metadata was invalid: %s' % metadata
+
+
+cdef str _call_error_no_metadata(c_call_error):
+ return _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % c_call_error
+
+
+cdef str _call_error(c_call_error, metadata):
+ if c_call_error == GRPC_CALL_ERROR_INVALID_METADATA:
+ return _call_error_metadata(metadata)
+ else:
+ return _call_error_no_metadata(c_call_error)
+
+
+cdef _check_call_error_no_metadata(c_call_error):
+ if c_call_error != GRPC_CALL_OK:
+ return _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % c_call_error
+ else:
+ return None
+
+
+cdef _check_and_raise_call_error_no_metadata(c_call_error):
+ error = _check_call_error_no_metadata(c_call_error)
+ if error is not None:
+ raise ValueError(error)
+
+
+cdef _check_call_error(c_call_error, metadata):
+ if c_call_error == GRPC_CALL_ERROR_INVALID_METADATA:
+ return _call_error_metadata(metadata)
+ else:
+ return _check_call_error_no_metadata(c_call_error)
+
+
+cdef void _raise_call_error_no_metadata(c_call_error) except *:
+ raise ValueError(_call_error_no_metadata(c_call_error))
+
+
+cdef void _raise_call_error(c_call_error, metadata) except *:
+ raise ValueError(_call_error(c_call_error, metadata))
+
+
+cdef _destroy_c_completion_queue(grpc_completion_queue *c_completion_queue):
+ grpc_completion_queue_shutdown(c_completion_queue)
+ grpc_completion_queue_destroy(c_completion_queue)
+
+
+cdef class _CallState:
+
+ def __cinit__(self):
+ self.due = set()
+
+
+cdef class _ChannelState:
+
+ def __cinit__(self):
+ self.condition = threading.Condition()
+ self.open = True
+ self.integrated_call_states = {}
+ self.segregated_call_states = set()
+ self.connectivity_due = set()
+ self.closed_reason = None
+
+
+cdef tuple _operate(grpc_call *c_call, object operations, object user_tag):
+ cdef grpc_call_error c_call_error
+ cdef _BatchOperationTag tag = _BatchOperationTag(user_tag, operations, None)
+ tag.prepare()
+ cpython.Py_INCREF(tag)
+ with nogil:
+ c_call_error = grpc_call_start_batch(
+ c_call, tag.c_ops, tag.c_nops, <cpython.PyObject *>tag, NULL)
+ return c_call_error, tag
+
+
+cdef object _operate_from_integrated_call(
+ _ChannelState channel_state, _CallState call_state, object operations,
+ object user_tag):
+ cdef grpc_call_error c_call_error
+ cdef _BatchOperationTag tag
+ with channel_state.condition:
+ if call_state.due:
+ c_call_error, tag = _operate(call_state.c_call, operations, user_tag)
+ if c_call_error == GRPC_CALL_OK:
+ call_state.due.add(tag)
+ channel_state.integrated_call_states[tag] = call_state
+ return True
+ else:
+ _raise_call_error_no_metadata(c_call_error)
+ else:
+ return False
+
+
+cdef object _operate_from_segregated_call(
+ _ChannelState channel_state, _CallState call_state, object operations,
+ object user_tag):
+ cdef grpc_call_error c_call_error
+ cdef _BatchOperationTag tag
+ with channel_state.condition:
+ if call_state.due:
+ c_call_error, tag = _operate(call_state.c_call, operations, user_tag)
+ if c_call_error == GRPC_CALL_OK:
+ call_state.due.add(tag)
+ return True
+ else:
+ _raise_call_error_no_metadata(c_call_error)
+ else:
+ return False
+
+
+cdef _cancel(
+ _ChannelState channel_state, _CallState call_state, grpc_status_code code,
+ str details):
+ cdef grpc_call_error c_call_error
+ with channel_state.condition:
+ if call_state.due:
+ c_call_error = grpc_call_cancel_with_status(
+ call_state.c_call, code, _encode(details), NULL)
+ _check_and_raise_call_error_no_metadata(c_call_error)
+
+
+cdef _next_call_event(
+ _ChannelState channel_state, grpc_completion_queue *c_completion_queue,
+ on_success, on_failure, deadline):
+ """Block on the next event out of the completion queue.
+
+ On success, `on_success` will be invoked with the tag taken from the CQ.
+ In the case of a failure due to an exception raised in a signal handler,
+ `on_failure` will be invoked with no arguments. Note that this situation
+ can only occur on the main thread.
+
+ Args:
+ channel_state: The state for the channel on which the RPC is running.
+ c_completion_queue: The CQ which will be polled.
+ on_success: A callable object to be invoked upon successful receipt of a
+ tag from the CQ.
+ on_failure: A callable object to be invoked in case a Python exception is
+ raised from a signal handler during polling.
+ deadline: The point after which the RPC will time out.
+ """
+ try:
+ tag, event = _latent_event(c_completion_queue, deadline)
+ # NOTE(rbellevi): This broad except enables us to clean up resources before
+ # propagating any exceptions raised by signal handlers to the application.
+ except:
+ if on_failure is not None:
+ on_failure()
+ raise
+ else:
+ with channel_state.condition:
+ on_success(tag)
+ channel_state.condition.notify_all()
+ return event
+
+
+# TODO(https://github.com/grpc/grpc/issues/14569): This could be a lot simpler.
+cdef void _call(
+ _ChannelState channel_state, _CallState call_state,
+ grpc_completion_queue *c_completion_queue, on_success, int flags, method,
+ host, object deadline, CallCredentials credentials,
+ object operationses_and_user_tags, object metadata,
+ object context) except *:
+ """Invokes an RPC.
+
+ Args:
+ channel_state: A _ChannelState with its "open" attribute set to True. RPCs
+ may not be invoked on a closed channel.
+ call_state: An empty _CallState to be altered (specifically assigned a
+ c_call and having its due set populated) if the RPC invocation is
+ successful.
+ c_completion_queue: A grpc_completion_queue to be used for the call's
+ operations.
+ on_success: A behavior to be called if attempting to start operations for
+ the call succeeds. If called the behavior will be called while holding the
+ channel_state condition and passed the tags associated with operations
+ that were successfully started for the call.
+ flags: Flags to be passed to gRPC Core as part of call creation.
+ method: The fully-qualified name of the RPC method being invoked.
+ host: A "host" string to be passed to gRPC Core as part of call creation.
+ deadline: A float for the deadline of the RPC, or None if the RPC is to have
+ no deadline.
+ credentials: A _CallCredentials for the RPC or None.
+ operationses_and_user_tags: A sequence of length-two sequences the first
+ element of which is a sequence of Operations and the second element of
+ which is an object to be used as a tag. A SendInitialMetadataOperation
+ must be present in the first element of this value.
+ metadata: The metadata for this call.
+ context: Context object for distributed tracing.
+ """
+ cdef grpc_slice method_slice
+ cdef grpc_slice host_slice
+ cdef grpc_slice *host_slice_ptr
+ cdef grpc_call_credentials *c_call_credentials
+ cdef grpc_call_error c_call_error
+ cdef tuple error_and_wrapper_tag
+ cdef _BatchOperationTag wrapper_tag
+ with channel_state.condition:
+ if channel_state.open:
+ method_slice = _slice_from_bytes(method)
+ if host is None:
+ host_slice_ptr = NULL
+ else:
+ host_slice = _slice_from_bytes(host)
+ host_slice_ptr = &host_slice
+ call_state.c_call = grpc_channel_create_call(
+ channel_state.c_channel, NULL, flags,
+ c_completion_queue, method_slice, host_slice_ptr,
+ _timespec_from_time(deadline), NULL)
+ grpc_slice_unref(method_slice)
+ if host_slice_ptr:
+ grpc_slice_unref(host_slice)
+ if context is not None:
+ set_census_context_on_call(call_state, context)
+ if credentials is not None:
+ c_call_credentials = credentials.c()
+ c_call_error = grpc_call_set_credentials(
+ call_state.c_call, c_call_credentials)
+ grpc_call_credentials_release(c_call_credentials)
+ if c_call_error != GRPC_CALL_OK:
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+ _raise_call_error_no_metadata(c_call_error)
+ started_tags = set()
+ for operations, user_tag in operationses_and_user_tags:
+ c_call_error, tag = _operate(call_state.c_call, operations, user_tag)
+ if c_call_error == GRPC_CALL_OK:
+ started_tags.add(tag)
+ else:
+ grpc_call_cancel(call_state.c_call, NULL)
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+ _raise_call_error(c_call_error, metadata)
+ else:
+ call_state.due.update(started_tags)
+ on_success(started_tags)
+ else:
+ raise ValueError('Cannot invoke RPC: %s' % channel_state.closed_reason)
+
+
+cdef void _process_integrated_call_tag(
+ _ChannelState state, _BatchOperationTag tag) except *:
+ cdef _CallState call_state = state.integrated_call_states.pop(tag)
+ call_state.due.remove(tag)
+ if not call_state.due:
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+
+
+cdef class IntegratedCall:
+
+ def __cinit__(self, _ChannelState channel_state, _CallState call_state):
+ self._channel_state = channel_state
+ self._call_state = call_state
+
+ def operate(self, operations, tag):
+ return _operate_from_integrated_call(
+ self._channel_state, self._call_state, operations, tag)
+
+ def cancel(self, code, details):
+ _cancel(self._channel_state, self._call_state, code, details)
+
+
+cdef IntegratedCall _integrated_call(
+ _ChannelState state, int flags, method, host, object deadline,
+ object metadata, CallCredentials credentials, operationses_and_user_tags,
+ object context):
+ call_state = _CallState()
+
+ def on_success(started_tags):
+ for started_tag in started_tags:
+ state.integrated_call_states[started_tag] = call_state
+
+ _call(
+ state, call_state, state.c_call_completion_queue, on_success, flags,
+ method, host, deadline, credentials, operationses_and_user_tags, metadata, context)
+
+ return IntegratedCall(state, call_state)
+
+
+cdef object _process_segregated_call_tag(
+ _ChannelState state, _CallState call_state,
+ grpc_completion_queue *c_completion_queue, _BatchOperationTag tag):
+ call_state.due.remove(tag)
+ if not call_state.due:
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+ state.segregated_call_states.remove(call_state)
+ _destroy_c_completion_queue(c_completion_queue)
+ return True
+ else:
+ return False
+
+
+cdef class SegregatedCall:
+
+ def __cinit__(self, _ChannelState channel_state, _CallState call_state):
+ self._channel_state = channel_state
+ self._call_state = call_state
+
+ def operate(self, operations, tag):
+ return _operate_from_segregated_call(
+ self._channel_state, self._call_state, operations, tag)
+
+ def cancel(self, code, details):
+ _cancel(self._channel_state, self._call_state, code, details)
+
+ def next_event(self):
+ def on_success(tag):
+ _process_segregated_call_tag(
+ self._channel_state, self._call_state, self._c_completion_queue, tag)
+ def on_failure():
+ self._call_state.due.clear()
+ grpc_call_unref(self._call_state.c_call)
+ self._call_state.c_call = NULL
+ self._channel_state.segregated_call_states.remove(self._call_state)
+ _destroy_c_completion_queue(self._c_completion_queue)
+ return _next_call_event(
+ self._channel_state, self._c_completion_queue, on_success, on_failure, None)
+
+
+cdef SegregatedCall _segregated_call(
+ _ChannelState state, int flags, method, host, object deadline,
+ object metadata, CallCredentials credentials, operationses_and_user_tags,
+ object context):
+ cdef _CallState call_state = _CallState()
+ cdef SegregatedCall segregated_call
+ cdef grpc_completion_queue *c_completion_queue
+
+ def on_success(started_tags):
+ state.segregated_call_states.add(call_state)
+
+ with state.condition:
+ if state.open:
+ c_completion_queue = (grpc_completion_queue_create_for_next(NULL))
+ else:
+ raise ValueError('Cannot invoke RPC on closed channel!')
+
+ try:
+ _call(
+ state, call_state, c_completion_queue, on_success, flags, method, host,
+ deadline, credentials, operationses_and_user_tags, metadata,
+ context)
+ except:
+ _destroy_c_completion_queue(c_completion_queue)
+ raise
+
+ segregated_call = SegregatedCall(state, call_state)
+ segregated_call._c_completion_queue = c_completion_queue
+ return segregated_call
+
+
+cdef object _watch_connectivity_state(
+ _ChannelState state, grpc_connectivity_state last_observed_state,
+ object deadline):
+ cdef _ConnectivityTag tag = _ConnectivityTag(object())
+ with state.condition:
+ if state.open:
+ cpython.Py_INCREF(tag)
+ grpc_channel_watch_connectivity_state(
+ state.c_channel, last_observed_state, _timespec_from_time(deadline),
+ state.c_connectivity_completion_queue, <cpython.PyObject *>tag)
+ state.connectivity_due.add(tag)
+ else:
+ raise ValueError('Cannot monitor channel state: %s' % state.closed_reason)
+ completed_tag, event = _latent_event(
+ state.c_connectivity_completion_queue, None)
+ with state.condition:
+ state.connectivity_due.remove(completed_tag)
+ state.condition.notify_all()
+ return event
+
+
+cdef _close(Channel channel, grpc_status_code code, object details,
+ drain_calls):
+ cdef _ChannelState state = channel._state
+ cdef _CallState call_state
+ encoded_details = _encode(details)
+ with state.condition:
+ if state.open:
+ state.open = False
+ state.closed_reason = details
+ for call_state in set(state.integrated_call_states.values()):
+ grpc_call_cancel_with_status(
+ call_state.c_call, code, encoded_details, NULL)
+ for call_state in state.segregated_call_states:
+ grpc_call_cancel_with_status(
+ call_state.c_call, code, encoded_details, NULL)
+ # TODO(https://github.com/grpc/grpc/issues/3064): Cancel connectivity
+ # watching.
+
+ if drain_calls:
+ while not _calls_drained(state):
+ event = channel.next_call_event()
+ if event.completion_type == CompletionType.queue_timeout:
+ continue
+ event.tag(event)
+ else:
+ while state.integrated_call_states:
+ state.condition.wait()
+ while state.connectivity_due:
+ state.condition.wait()
+
+ _destroy_c_completion_queue(state.c_call_completion_queue)
+ _destroy_c_completion_queue(state.c_connectivity_completion_queue)
+ grpc_channel_destroy(state.c_channel)
+ state.c_channel = NULL
+ grpc_shutdown()
+ state.condition.notify_all()
+ else:
+ # Another call to close already completed in the past or is currently
+ # being executed in another thread.
+ while state.c_channel != NULL:
+ state.condition.wait()
+
+
+cdef _calls_drained(_ChannelState state):
+ return not (state.integrated_call_states or state.segregated_call_states or
+ state.connectivity_due)
+
+cdef class Channel:
+
+ def __cinit__(
+ self, bytes target, object arguments,
+ ChannelCredentials channel_credentials):
+ arguments = () if arguments is None else tuple(arguments)
+ fork_handlers_and_grpc_init()
+ self._state = _ChannelState()
+ self._state.c_call_completion_queue = (
+ grpc_completion_queue_create_for_next(NULL))
+ self._state.c_connectivity_completion_queue = (
+ grpc_completion_queue_create_for_next(NULL))
+ self._arguments = arguments
+ cdef _ChannelArgs channel_args = _ChannelArgs(arguments)
+ c_channel_credentials = (
+ channel_credentials.c() if channel_credentials is not None
+ else grpc_insecure_credentials_create())
+ self._state.c_channel = grpc_channel_create(
+ <char *>target, c_channel_credentials, channel_args.c_args())
+ grpc_channel_credentials_release(c_channel_credentials)
+
+ def target(self):
+ cdef char *c_target
+ with self._state.condition:
+ c_target = grpc_channel_get_target(self._state.c_channel)
+ target = <bytes>c_target
+ gpr_free(c_target)
+ return target
+
+ def integrated_call(
+ self, int flags, method, host, object deadline, object metadata,
+ CallCredentials credentials, operationses_and_tags,
+ object context = None):
+ return _integrated_call(
+ self._state, flags, method, host, deadline, metadata, credentials,
+ operationses_and_tags, context)
+
+ def next_call_event(self):
+ def on_success(tag):
+ if tag is not None:
+ _process_integrated_call_tag(self._state, tag)
+ if is_fork_support_enabled():
+ queue_deadline = time.time() + 1.0
+ else:
+ queue_deadline = None
+ # NOTE(gnossen): It is acceptable for on_failure to be None here because
+ # failure conditions can only ever happen on the main thread and this
+ # method is only ever invoked on the channel spin thread.
+ return _next_call_event(self._state, self._state.c_call_completion_queue,
+ on_success, None, queue_deadline)
+
+ def segregated_call(
+ self, int flags, method, host, object deadline, object metadata,
+ CallCredentials credentials, operationses_and_tags,
+ object context = None):
+ return _segregated_call(
+ self._state, flags, method, host, deadline, metadata, credentials,
+ operationses_and_tags, context)
+
+ def check_connectivity_state(self, bint try_to_connect):
+ with self._state.condition:
+ if self._state.open:
+ return grpc_channel_check_connectivity_state(
+ self._state.c_channel, try_to_connect)
+ else:
+ raise ValueError('Cannot invoke RPC: %s' % self._state.closed_reason)
+
+ def watch_connectivity_state(
+ self, grpc_connectivity_state last_observed_state, object deadline):
+ return _watch_connectivity_state(self._state, last_observed_state, deadline)
+
+ def close(self, code, details):
+ _close(self, code, details, False)
+
+ def close_on_fork(self, code, details):
+ _close(self, code, details, True)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channelz.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channelz.pyx.pxi
new file mode 100644
index 0000000000..36c8cd121c
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/channelz.pyx.pxi
@@ -0,0 +1,71 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def channelz_get_top_channels(start_channel_id):
+ cdef char *c_returned_str = grpc_channelz_get_top_channels(
+ start_channel_id,
+ )
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get top channels, please ensure your' \
+ ' start_channel_id==%s is valid' % start_channel_id)
+ return c_returned_str
+
+def channelz_get_servers(start_server_id):
+ cdef char *c_returned_str = grpc_channelz_get_servers(start_server_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get servers, please ensure your' \
+ ' start_server_id==%s is valid' % start_server_id)
+ return c_returned_str
+
+def channelz_get_server(server_id):
+ cdef char *c_returned_str = grpc_channelz_get_server(server_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the server, please ensure your' \
+ ' server_id==%s is valid' % server_id)
+ return c_returned_str
+
+def channelz_get_server_sockets(server_id, start_socket_id, max_results):
+ cdef char *c_returned_str = grpc_channelz_get_server_sockets(
+ server_id,
+ start_socket_id,
+ max_results,
+ )
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get server sockets, please ensure your' \
+ ' server_id==%s and start_socket_id==%s and' \
+ ' max_results==%s is valid' %
+ (server_id, start_socket_id, max_results))
+ return c_returned_str
+
+def channelz_get_channel(channel_id):
+ cdef char *c_returned_str = grpc_channelz_get_channel(channel_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the channel, please ensure your' \
+ ' channel_id==%s is valid' % (channel_id))
+ return c_returned_str
+
+def channelz_get_subchannel(subchannel_id):
+ cdef char *c_returned_str = grpc_channelz_get_subchannel(subchannel_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the subchannel, please ensure your' \
+ ' subchannel_id==%s is valid' % (subchannel_id))
+ return c_returned_str
+
+def channelz_get_socket(socket_id):
+ cdef char *c_returned_str = grpc_channelz_get_socket(socket_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the socket, please ensure your' \
+ ' socket_id==%s is valid' % (socket_id))
+ return c_returned_str
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
new file mode 100644
index 0000000000..983aa6a87b
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
@@ -0,0 +1,32 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef int g_interrupt_check_period_ms = 200
+
+cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) except *
+
+
+cdef _interpret_event(grpc_event c_event)
+
+cdef class _LatentEventArg:
+ cdef grpc_completion_queue *c_completion_queue
+ cdef object deadline
+
+cdef class CompletionQueue:
+
+ cdef grpc_completion_queue *c_completion_queue
+ cdef bint is_shutting_down
+ cdef bint is_shutdown
+
+ cdef _interpret_event(self, grpc_event c_event)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
new file mode 100644
index 0000000000..f9f5de2d83
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -0,0 +1,137 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) except *:
+ global g_interrupt_check_period_ms
+ cdef gpr_timespec c_increment
+ cdef gpr_timespec c_timeout
+ cdef gpr_timespec c_deadline
+ c_increment = gpr_time_from_millis(g_interrupt_check_period_ms, GPR_TIMESPAN)
+ if deadline is None:
+ c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
+ else:
+ c_deadline = _timespec_from_time(deadline)
+
+ while True:
+ with nogil:
+ c_timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c_increment)
+ if gpr_time_cmp(c_timeout, c_deadline) > 0:
+ c_timeout = c_deadline
+
+ c_event = grpc_completion_queue_next(c_completion_queue, c_timeout, NULL)
+
+ if (c_event.type != GRPC_QUEUE_TIMEOUT or
+ gpr_time_cmp(c_timeout, c_deadline) == 0):
+ break
+
+ # Handle any signals
+ cpython.PyErr_CheckSignals()
+ return c_event
+
+cdef _interpret_event(grpc_event c_event):
+ cdef _Tag tag
+ if c_event.type == GRPC_QUEUE_TIMEOUT:
+ # TODO(ericgribkoff) Do not coopt ConnectivityEvent here.
+ return None, ConnectivityEvent(GRPC_QUEUE_TIMEOUT, False, None)
+ elif c_event.type == GRPC_QUEUE_SHUTDOWN:
+ # NOTE(nathaniel): For now we coopt ConnectivityEvent here.
+ return None, ConnectivityEvent(GRPC_QUEUE_SHUTDOWN, False, None)
+ else:
+ tag = <_Tag>c_event.tag
+ # We receive event tags only after they've been inc-ref'd elsewhere in
+ # the code.
+ cpython.Py_DECREF(tag)
+ return tag, tag.event(c_event)
+
+cdef _internal_latent_event(_LatentEventArg latent_event_arg):
+ cdef grpc_event c_event = _next(latent_event_arg.c_completion_queue, latent_event_arg.deadline)
+ return _interpret_event(c_event)
+
+cdef _latent_event(grpc_completion_queue *c_completion_queue, object deadline):
+ global g_gevent_activated
+
+ latent_event_arg = _LatentEventArg()
+ latent_event_arg.c_completion_queue = c_completion_queue
+ latent_event_arg.deadline = deadline
+
+ if g_gevent_activated:
+ # For gevent, completion_queue_next is run in a native thread pool.
+ global g_gevent_threadpool
+
+ result = g_gevent_threadpool.apply(_internal_latent_event, (latent_event_arg,))
+ return result
+ else:
+ return _internal_latent_event(latent_event_arg)
+
+cdef class CompletionQueue:
+
+ def __cinit__(self, shutdown_cq=False):
+ cdef grpc_completion_queue_attributes c_attrs
+ fork_handlers_and_grpc_init()
+ if shutdown_cq:
+ c_attrs.version = 1
+ c_attrs.cq_completion_type = GRPC_CQ_NEXT
+ c_attrs.cq_polling_type = GRPC_CQ_NON_LISTENING
+ c_attrs.cq_shutdown_cb = NULL
+ self.c_completion_queue = grpc_completion_queue_create(
+ grpc_completion_queue_factory_lookup(&c_attrs), &c_attrs, NULL);
+ else:
+ self.c_completion_queue = grpc_completion_queue_create_for_next(NULL)
+ self.is_shutting_down = False
+ self.is_shutdown = False
+
+ cdef _interpret_event(self, grpc_event c_event):
+ unused_tag, event = _interpret_event(c_event)
+ if event.completion_type == GRPC_QUEUE_SHUTDOWN:
+ self.is_shutdown = True
+ return event
+
+ def _internal_poll(self, deadline):
+ return self._interpret_event(_next(self.c_completion_queue, deadline))
+
+ # We name this 'poll' to avoid problems with CPython's expectations for
+ # 'special' methods (like next and __next__).
+ def poll(self, deadline=None):
+ global g_gevent_activated
+ if g_gevent_activated:
+ return g_gevent_threadpool.apply(CompletionQueue._internal_poll, (self, deadline))
+ else:
+ return self._internal_poll(deadline)
+
+ def shutdown(self):
+ with nogil:
+ grpc_completion_queue_shutdown(self.c_completion_queue)
+ self.is_shutting_down = True
+
+ def clear(self):
+ if not self.is_shutting_down:
+ raise ValueError('queue must be shutting down to be cleared')
+ while self.poll().type != GRPC_QUEUE_SHUTDOWN:
+ pass
+
+ def __dealloc__(self):
+ cdef gpr_timespec c_deadline
+ c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
+ if self.c_completion_queue != NULL:
+ # Ensure shutdown
+ if not self.is_shutting_down:
+ grpc_completion_queue_shutdown(self.c_completion_queue)
+ # Pump the queue (All outstanding calls should have been cancelled)
+ while not self.is_shutdown:
+ event = grpc_completion_queue_next(
+ self.c_completion_queue, c_deadline, NULL)
+ self._interpret_event(event)
+ grpc_completion_queue_destroy(self.c_completion_queue)
+ grpc_shutdown()
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pxd.pxi
new file mode 100644
index 0000000000..827f6f17ca
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pxd.pxi
@@ -0,0 +1,117 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class CallCredentials:
+
+ cdef grpc_call_credentials *c(self) except *
+
+ # TODO(https://github.com/grpc/grpc/issues/12531): remove.
+ cdef grpc_call_credentials *c_credentials
+
+
+cdef int _get_metadata(
+ void *state, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) except * with gil
+
+cdef void _destroy(void *state) except * with gil
+
+
+cdef class MetadataPluginCallCredentials(CallCredentials):
+
+ cdef readonly object _metadata_plugin
+ cdef readonly bytes _name
+
+ cdef grpc_call_credentials *c(self) except *
+
+
+cdef grpc_call_credentials *_composition(call_credentialses)
+
+
+cdef class CompositeCallCredentials(CallCredentials):
+
+ cdef readonly tuple _call_credentialses
+
+ cdef grpc_call_credentials *c(self) except *
+
+
+cdef class ChannelCredentials:
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class SSLSessionCacheLRU:
+
+ cdef grpc_ssl_session_cache *_cache
+
+
+cdef class SSLChannelCredentials(ChannelCredentials):
+
+ cdef readonly object _pem_root_certificates
+ cdef readonly object _private_key
+ cdef readonly object _certificate_chain
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class CompositeChannelCredentials(ChannelCredentials):
+
+ cdef readonly tuple _call_credentialses
+ cdef readonly ChannelCredentials _channel_credentials
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class XDSChannelCredentials(ChannelCredentials):
+
+ cdef readonly ChannelCredentials _fallback_credentials
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class ServerCertificateConfig:
+
+ cdef grpc_ssl_server_certificate_config *c_cert_config
+ cdef const char *c_pem_root_certs
+ cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs
+ cdef size_t c_ssl_pem_key_cert_pairs_count
+ cdef list references
+
+
+cdef class ServerCredentials:
+
+ cdef grpc_server_credentials *c_credentials
+ cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs
+ cdef size_t c_ssl_pem_key_cert_pairs_count
+ cdef list references
+ # the cert config related state is used only if this credentials is
+ # created with cert config/fetcher
+ cdef object initial_cert_config
+ cdef object cert_config_fetcher
+ # whether C-core has asked for the initial_cert_config
+ cdef bint initial_cert_config_fetched
+
+
+cdef class LocalChannelCredentials(ChannelCredentials):
+
+ cdef grpc_local_connect_type _local_connect_type
+
+
+cdef class ALTSChannelCredentials(ChannelCredentials):
+ cdef grpc_alts_credentials_options *c_options
+
+ cdef grpc_channel_credentials *c(self) except *
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pyx.pxi
new file mode 100644
index 0000000000..23de3a0b18
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -0,0 +1,442 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def _spawn_callback_in_thread(cb_func, args):
+ t = ForkManagedThread(target=cb_func, args=args)
+ t.setDaemon(True)
+ t.start()
+
+async_callback_func = _spawn_callback_in_thread
+
+def set_async_callback_func(callback_func):
+ global async_callback_func
+ async_callback_func = callback_func
+
+def _spawn_callback_async(callback, args):
+ async_callback_func(callback, args)
+
+
+cdef class CallCredentials:
+
+ cdef grpc_call_credentials *c(self) except *:
+ raise NotImplementedError()
+
+
+cdef int _get_metadata(void *state,
+ grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb,
+ void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md,
+ grpc_status_code *status,
+ const char **error_details) except * with gil:
+ cdef size_t metadata_count
+ cdef grpc_metadata *c_metadata
+ def callback(metadata, grpc_status_code status, bytes error_details):
+ cdef char* c_error_details = NULL
+ if error_details is not None:
+ c_error_details = <char*> error_details
+ if status == StatusCode.ok:
+ _store_c_metadata(metadata, &c_metadata, &metadata_count)
+ with nogil:
+ cb(user_data, c_metadata, metadata_count, status, NULL)
+ _release_c_metadata(c_metadata, metadata_count)
+ else:
+ with nogil:
+ cb(user_data, NULL, 0, status, c_error_details)
+ args = context.service_url, context.method_name, callback,
+ plugin = <object>state
+ if plugin._stored_ctx is not None:
+ plugin._stored_ctx.copy().run(_spawn_callback_async, plugin, args)
+ else:
+ _spawn_callback_async(<object>state, args)
+ return 0 # Asynchronous return
+
+
+cdef void _destroy(void *state) except * with gil:
+ cpython.Py_DECREF(<object>state)
+ grpc_shutdown()
+
+
+cdef class MetadataPluginCallCredentials(CallCredentials):
+
+ def __cinit__(self, metadata_plugin, name):
+ self._metadata_plugin = metadata_plugin
+ self._name = name
+
+ cdef grpc_call_credentials *c(self) except *:
+ cdef grpc_metadata_credentials_plugin c_metadata_plugin
+ c_metadata_plugin.get_metadata = _get_metadata
+ c_metadata_plugin.destroy = _destroy
+ c_metadata_plugin.state = <void *>self._metadata_plugin
+ c_metadata_plugin.type = self._name
+ cpython.Py_INCREF(self._metadata_plugin)
+ fork_handlers_and_grpc_init()
+ # TODO(yihuazhang): Expose min_security_level via the Python API so that
+ # applications can decide what minimum security level their plugins require.
+ return grpc_metadata_credentials_create_from_plugin(c_metadata_plugin, GRPC_PRIVACY_AND_INTEGRITY, NULL)
+
+
+cdef grpc_call_credentials *_composition(call_credentialses):
+ call_credentials_iterator = iter(call_credentialses)
+ cdef CallCredentials composition = next(call_credentials_iterator)
+ cdef grpc_call_credentials *c_composition = composition.c()
+ cdef CallCredentials additional_call_credentials
+ cdef grpc_call_credentials *c_additional_call_credentials
+ cdef grpc_call_credentials *c_next_composition
+ for additional_call_credentials in call_credentials_iterator:
+ c_additional_call_credentials = additional_call_credentials.c()
+ c_next_composition = grpc_composite_call_credentials_create(
+ c_composition, c_additional_call_credentials, NULL)
+ grpc_call_credentials_release(c_composition)
+ grpc_call_credentials_release(c_additional_call_credentials)
+ c_composition = c_next_composition
+ return c_composition
+
+
+cdef class CompositeCallCredentials(CallCredentials):
+
+ def __cinit__(self, call_credentialses):
+ self._call_credentialses = call_credentialses
+
+ cdef grpc_call_credentials *c(self) except *:
+ return _composition(self._call_credentialses)
+
+
+cdef class ChannelCredentials:
+
+ cdef grpc_channel_credentials *c(self) except *:
+ raise NotImplementedError()
+
+
+cdef class SSLSessionCacheLRU:
+
+ def __cinit__(self, capacity):
+ fork_handlers_and_grpc_init()
+ self._cache = grpc_ssl_session_cache_create_lru(capacity)
+
+ def __int__(self):
+ return <uintptr_t>self._cache
+
+ def __dealloc__(self):
+ if self._cache != NULL:
+ grpc_ssl_session_cache_destroy(self._cache)
+ grpc_shutdown()
+
+
+cdef class SSLChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, pem_root_certificates, private_key, certificate_chain):
+ if pem_root_certificates is not None and not isinstance(pem_root_certificates, bytes):
+ raise TypeError('expected certificate to be bytes, got %s' % (type(pem_root_certificates)))
+ self._pem_root_certificates = pem_root_certificates
+ self._private_key = private_key
+ self._certificate_chain = certificate_chain
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef const char *c_pem_root_certificates
+ cdef grpc_ssl_pem_key_cert_pair c_pem_key_certificate_pair
+ if self._pem_root_certificates is None:
+ c_pem_root_certificates = NULL
+ else:
+ c_pem_root_certificates = self._pem_root_certificates
+ if self._private_key is None and self._certificate_chain is None:
+ return grpc_ssl_credentials_create(
+ c_pem_root_certificates, NULL, NULL, NULL)
+ else:
+ if self._private_key:
+ c_pem_key_certificate_pair.private_key = self._private_key
+ else:
+ c_pem_key_certificate_pair.private_key = NULL
+ if self._certificate_chain:
+ c_pem_key_certificate_pair.certificate_chain = self._certificate_chain
+ else:
+ c_pem_key_certificate_pair.certificate_chain = NULL
+ return grpc_ssl_credentials_create(
+ c_pem_root_certificates, &c_pem_key_certificate_pair, NULL, NULL)
+
+
+cdef class CompositeChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, call_credentialses, channel_credentials):
+ self._call_credentialses = call_credentialses
+ self._channel_credentials = channel_credentials
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef grpc_channel_credentials *c_channel_credentials
+ c_channel_credentials = self._channel_credentials.c()
+ cdef grpc_call_credentials *c_call_credentials_composition = _composition(
+ self._call_credentialses)
+ cdef grpc_channel_credentials *composition
+ c_composition = grpc_composite_channel_credentials_create(
+ c_channel_credentials, c_call_credentials_composition, NULL)
+ grpc_channel_credentials_release(c_channel_credentials)
+ grpc_call_credentials_release(c_call_credentials_composition)
+ return c_composition
+
+
+cdef class XDSChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, fallback_credentials):
+ self._fallback_credentials = fallback_credentials
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef grpc_channel_credentials *c_fallback_creds = self._fallback_credentials.c()
+ cdef grpc_channel_credentials *xds_creds = grpc_xds_credentials_create(c_fallback_creds)
+ grpc_channel_credentials_release(c_fallback_creds)
+ return xds_creds
+
+
+cdef class ServerCertificateConfig:
+
+ def __cinit__(self):
+ fork_handlers_and_grpc_init()
+ self.c_cert_config = NULL
+ self.c_pem_root_certs = NULL
+ self.c_ssl_pem_key_cert_pairs = NULL
+ self.references = []
+
+ def __dealloc__(self):
+ grpc_ssl_server_certificate_config_destroy(self.c_cert_config)
+ gpr_free(self.c_ssl_pem_key_cert_pairs)
+ grpc_shutdown()
+
+
+cdef class ServerCredentials:
+
+ def __cinit__(self):
+ fork_handlers_and_grpc_init()
+ self.c_credentials = NULL
+ self.references = []
+ self.initial_cert_config = None
+ self.cert_config_fetcher = None
+ self.initial_cert_config_fetched = False
+
+ def __dealloc__(self):
+ if self.c_credentials != NULL:
+ grpc_server_credentials_release(self.c_credentials)
+ grpc_shutdown()
+
+cdef const char* _get_c_pem_root_certs(pem_root_certs):
+ if pem_root_certs is None:
+ return NULL
+ else:
+ return pem_root_certs
+
+cdef grpc_ssl_pem_key_cert_pair* _create_c_ssl_pem_key_cert_pairs(pem_key_cert_pairs):
+ # return a malloc'ed grpc_ssl_pem_key_cert_pair from a _list_ of SslPemKeyCertPair
+ for pair in pem_key_cert_pairs:
+ if not isinstance(pair, SslPemKeyCertPair):
+ raise TypeError("expected pem_key_cert_pairs to be sequence of "
+ "SslPemKeyCertPair")
+ cdef size_t c_ssl_pem_key_cert_pairs_count = len(pem_key_cert_pairs)
+ cdef grpc_ssl_pem_key_cert_pair* c_ssl_pem_key_cert_pairs = NULL
+ with nogil:
+ c_ssl_pem_key_cert_pairs = (
+ <grpc_ssl_pem_key_cert_pair *>gpr_malloc(
+ sizeof(grpc_ssl_pem_key_cert_pair) * c_ssl_pem_key_cert_pairs_count))
+ for i in range(c_ssl_pem_key_cert_pairs_count):
+ c_ssl_pem_key_cert_pairs[i] = (
+ (<SslPemKeyCertPair>pem_key_cert_pairs[i]).c_pair)
+ return c_ssl_pem_key_cert_pairs
+
+def server_credentials_ssl(pem_root_certs, pem_key_cert_pairs,
+ bint force_client_auth):
+ pem_root_certs = str_to_bytes(pem_root_certs)
+ pem_key_cert_pairs = list(pem_key_cert_pairs)
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.references.append(pem_root_certs)
+ credentials.references.append(pem_key_cert_pairs)
+ cdef const char * c_pem_root_certs = _get_c_pem_root_certs(pem_root_certs)
+ credentials.c_ssl_pem_key_cert_pairs_count = len(pem_key_cert_pairs)
+ credentials.c_ssl_pem_key_cert_pairs = _create_c_ssl_pem_key_cert_pairs(pem_key_cert_pairs)
+ cdef grpc_ssl_server_certificate_config *c_cert_config = NULL
+ c_cert_config = grpc_ssl_server_certificate_config_create(
+ c_pem_root_certs, credentials.c_ssl_pem_key_cert_pairs,
+ credentials.c_ssl_pem_key_cert_pairs_count)
+ cdef grpc_ssl_server_credentials_options* c_options = NULL
+ # C-core assumes ownership of c_cert_config
+ c_options = grpc_ssl_server_credentials_create_options_using_config(
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+ if force_client_auth else
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ c_cert_config)
+ # C-core assumes ownership of c_options
+ credentials.c_credentials = grpc_ssl_server_credentials_create_with_options(c_options)
+ return credentials
+
+def server_certificate_config_ssl(pem_root_certs, pem_key_cert_pairs):
+ pem_root_certs = str_to_bytes(pem_root_certs)
+ pem_key_cert_pairs = list(pem_key_cert_pairs)
+ cdef ServerCertificateConfig cert_config = ServerCertificateConfig()
+ cert_config.references.append(pem_root_certs)
+ cert_config.references.append(pem_key_cert_pairs)
+ cert_config.c_pem_root_certs = _get_c_pem_root_certs(pem_root_certs)
+ cert_config.c_ssl_pem_key_cert_pairs_count = len(pem_key_cert_pairs)
+ cert_config.c_ssl_pem_key_cert_pairs = _create_c_ssl_pem_key_cert_pairs(pem_key_cert_pairs)
+ cert_config.c_cert_config = grpc_ssl_server_certificate_config_create(
+ cert_config.c_pem_root_certs, cert_config.c_ssl_pem_key_cert_pairs,
+ cert_config.c_ssl_pem_key_cert_pairs_count)
+ return cert_config
+
+def server_credentials_ssl_dynamic_cert_config(initial_cert_config,
+ cert_config_fetcher,
+ bint force_client_auth):
+ if not isinstance(initial_cert_config, grpc.ServerCertificateConfiguration):
+ raise TypeError(
+ 'initial_cert_config must be a grpc.ServerCertificateConfiguration')
+ if not callable(cert_config_fetcher):
+ raise TypeError('cert_config_fetcher must be callable')
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.initial_cert_config = initial_cert_config
+ credentials.cert_config_fetcher = cert_config_fetcher
+ cdef grpc_ssl_server_credentials_options* c_options = NULL
+ c_options = grpc_ssl_server_credentials_create_options_using_config_fetcher(
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+ if force_client_auth else
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ _server_cert_config_fetcher_wrapper,
+ <void*>credentials)
+ # C-core assumes ownership of c_options
+ credentials.c_credentials = grpc_ssl_server_credentials_create_with_options(c_options)
+ return credentials
+
+cdef grpc_ssl_certificate_config_reload_status _server_cert_config_fetcher_wrapper(
+ void* user_data, grpc_ssl_server_certificate_config **config) with gil:
+ # This is a credentials.ServerCertificateConfig
+ cdef ServerCertificateConfig cert_config = None
+ if not user_data:
+ raise ValueError('internal error: user_data must be specified')
+ credentials = <ServerCredentials>user_data
+ if not credentials.initial_cert_config_fetched:
+ # C-core is asking for the initial cert config
+ credentials.initial_cert_config_fetched = True
+ cert_config = credentials.initial_cert_config._certificate_configuration
+ else:
+ user_cb = credentials.cert_config_fetcher
+ try:
+ cert_config_wrapper = user_cb()
+ except Exception:
+ _LOGGER.exception('Error fetching certificate config')
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
+ if cert_config_wrapper is None:
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED
+ elif not isinstance(
+ cert_config_wrapper, grpc.ServerCertificateConfiguration):
+ _LOGGER.error(
+ 'Error fetching certificate configuration: certificate '
+ 'configuration must be of type grpc.ServerCertificateConfiguration, '
+ 'not %s' % type(cert_config_wrapper).__name__)
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
+ else:
+ cert_config = cert_config_wrapper._certificate_configuration
+ config[0] = <grpc_ssl_server_certificate_config*>cert_config.c_cert_config
+ # our caller will assume ownership of memory, so we have to recreate
+ # a copy of c_cert_config here
+ cert_config.c_cert_config = grpc_ssl_server_certificate_config_create(
+ cert_config.c_pem_root_certs, cert_config.c_ssl_pem_key_cert_pairs,
+ cert_config.c_ssl_pem_key_cert_pairs_count)
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW
+
+
+class LocalConnectionType:
+ uds = UDS
+ local_tcp = LOCAL_TCP
+
+cdef class LocalChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, grpc_local_connect_type local_connect_type):
+ self._local_connect_type = local_connect_type
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef grpc_local_connect_type local_connect_type
+ local_connect_type = self._local_connect_type
+ return grpc_local_credentials_create(local_connect_type)
+
+def channel_credentials_local(grpc_local_connect_type local_connect_type):
+ return LocalChannelCredentials(local_connect_type)
+
+cdef class InsecureChannelCredentials(ChannelCredentials):
+
+ cdef grpc_channel_credentials *c(self) except *:
+ return grpc_insecure_credentials_create()
+
+def channel_credentials_insecure():
+ return InsecureChannelCredentials()
+
+def server_credentials_local(grpc_local_connect_type local_connect_type):
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.c_credentials = grpc_local_server_credentials_create(local_connect_type)
+ return credentials
+
+def xds_server_credentials(ServerCredentials fallback_credentials):
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.c_credentials = grpc_xds_server_credentials_create(fallback_credentials.c_credentials)
+ # NOTE: We do not need to call grpc_server_credentials_release on the
+ # fallback credentials here because this will be done by the __dealloc__
+ # method of its Cython wrapper.
+ return credentials
+
+def insecure_server_credentials():
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.c_credentials = grpc_insecure_server_credentials_create()
+ return credentials
+
+cdef class ALTSChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, list service_accounts):
+ self.c_options = grpc_alts_credentials_client_options_create()
+ cdef str account
+ for account in service_accounts:
+ grpc_alts_credentials_client_options_add_target_service_account(self.c_options, account)
+
+ def __dealloc__(self):
+ if self.c_options != NULL:
+ grpc_alts_credentials_options_destroy(self.c_options)
+
+ cdef grpc_channel_credentials *c(self) except *:
+ return grpc_alts_credentials_create(self.c_options)
+
+
+def channel_credentials_alts(list service_accounts):
+ return ALTSChannelCredentials(service_accounts)
+
+
+def server_credentials_alts():
+ cdef ServerCredentials credentials = ServerCredentials()
+ cdef grpc_alts_credentials_options* c_options = grpc_alts_credentials_server_options_create()
+ credentials.c_credentials = grpc_alts_server_credentials_create(c_options)
+ # Options can be destroyed as deep copy was performed.
+ grpc_alts_credentials_options_destroy(c_options)
+ return credentials
+
+
+cdef class ComputeEngineChannelCredentials(ChannelCredentials):
+ cdef grpc_channel_credentials* _c_creds
+ cdef grpc_call_credentials* _call_creds
+
+ def __cinit__(self, CallCredentials call_creds):
+ self._c_creds = NULL
+ self._call_creds = call_creds.c()
+ if self._call_creds == NULL:
+ raise ValueError("Call credentials may not be NULL.")
+
+ cdef grpc_channel_credentials *c(self) except *:
+ self._c_creds = grpc_google_default_credentials_create(self._call_creds)
+ return self._c_creds
+
+
+def channel_credentials_compute_engine(call_creds):
+ return ComputeEngineChannelCredentials(call_creds)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/csds.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/csds.pyx.pxi
new file mode 100644
index 0000000000..c33eb76e47
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/csds.pyx.pxi
@@ -0,0 +1,21 @@
+# Copyright 2021 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def dump_xds_configs():
+ cdef grpc_slice client_config_in_slice
+ with nogil:
+ client_config_in_slice = grpc_dump_xds_configs()
+ cdef bytes result = _slice_bytes(client_config_in_slice)
+ return result
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pxd.pxi
new file mode 100644
index 0000000000..686199ecf4
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pxd.pxi
@@ -0,0 +1,45 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class ConnectivityEvent:
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
+
+
+cdef class RequestCallEvent:
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
+ cdef readonly Call call
+ cdef readonly CallDetails call_details
+ cdef readonly tuple invocation_metadata
+
+
+cdef class BatchOperationEvent:
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
+ cdef readonly object batch_operations
+
+
+cdef class ServerShutdownEvent:
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pyx.pxi
new file mode 100644
index 0000000000..af26d27318
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/event.pyx.pxi
@@ -0,0 +1,55 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class ConnectivityEvent:
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
+
+
+cdef class RequestCallEvent:
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag,
+ Call call, CallDetails call_details, tuple invocation_metadata):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
+ self.call = call
+ self.call_details = call_details
+ self.invocation_metadata = invocation_metadata
+
+
+cdef class BatchOperationEvent:
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag,
+ object batch_operations):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
+ self.batch_operations = batch_operations
+
+
+cdef class ServerShutdownEvent:
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pxd.pxi
new file mode 100644
index 0000000000..a925bdd2e6
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pxd.pxi
@@ -0,0 +1,29 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef extern from "pthread.h" nogil:
+ int pthread_atfork(
+ void (*prepare)() nogil,
+ void (*parent)() nogil,
+ void (*child)() nogil)
+
+
+cdef void __prefork() nogil
+
+
+cdef void __postfork_parent() nogil
+
+
+cdef void __postfork_child() nogil \ No newline at end of file
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pyx.pxi
new file mode 100644
index 0000000000..53657e8b1a
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_posix.pyx.pxi
@@ -0,0 +1,208 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+_AWAIT_THREADS_TIMEOUT_SECONDS = 5
+
+_TRUE_VALUES = ['yes', 'Yes', 'YES', 'true', 'True', 'TRUE', '1']
+
+# This flag enables experimental support within gRPC Python for applications
+# that will fork() without exec(). When enabled, gRPC Python will attempt to
+# pause all of its internally created threads before the fork syscall proceeds.
+#
+# For this to be successful, the application must not have multiple threads of
+# its own calling into gRPC when fork is invoked. Any callbacks from gRPC
+# Python-spawned threads into user code (e.g., callbacks for asynchronous RPCs)
+# must not block and should execute quickly.
+#
+# This flag is not supported on Windows.
+# This flag is also not supported for non-native IO manager.
+_GRPC_ENABLE_FORK_SUPPORT = (
+ os.environ.get('GRPC_ENABLE_FORK_SUPPORT', '0')
+ .lower() in _TRUE_VALUES)
+
+_fork_handler_failed = False
+
+cdef void __prefork() nogil:
+ with gil:
+ global _fork_handler_failed
+ _fork_handler_failed = False
+ with _fork_state.fork_in_progress_condition:
+ _fork_state.fork_in_progress = True
+ if not _fork_state.active_thread_count.await_zero_threads(
+ _AWAIT_THREADS_TIMEOUT_SECONDS):
+ _LOGGER.error(
+ 'Failed to shutdown gRPC Python threads prior to fork. '
+ 'Behavior after fork will be undefined.')
+ _fork_handler_failed = True
+
+
+cdef void __postfork_parent() nogil:
+ with gil:
+ with _fork_state.fork_in_progress_condition:
+ _fork_state.fork_in_progress = False
+ _fork_state.fork_in_progress_condition.notify_all()
+
+
+cdef void __postfork_child() nogil:
+ with gil:
+ try:
+ if _fork_handler_failed:
+ return
+ # Thread could be holding the fork_in_progress_condition inside of
+ # block_if_fork_in_progress() when fork occurs. Reset the lock here.
+ _fork_state.fork_in_progress_condition = threading.Condition()
+ # A thread in return_from_user_request_generator() may hold this lock
+ # when fork occurs.
+ _fork_state.active_thread_count = _ActiveThreadCount()
+ for state_to_reset in _fork_state.postfork_states_to_reset:
+ state_to_reset.reset_postfork_child()
+ _fork_state.postfork_states_to_reset = []
+ _fork_state.fork_epoch += 1
+ for channel in _fork_state.channels:
+ channel._close_on_fork()
+ with _fork_state.fork_in_progress_condition:
+ _fork_state.fork_in_progress = False
+ except:
+ _LOGGER.error('Exiting child due to raised exception')
+ _LOGGER.error(sys.exc_info()[0])
+ os._exit(os.EX_USAGE)
+
+ if grpc_is_initialized() > 0:
+ with gil:
+ _LOGGER.error('Failed to shutdown gRPC Core after fork()')
+ os._exit(os.EX_USAGE)
+
+
+def fork_handlers_and_grpc_init():
+ grpc_init()
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ with _fork_state.fork_handler_registered_lock:
+ if not _fork_state.fork_handler_registered:
+ pthread_atfork(&__prefork, &__postfork_parent, &__postfork_child)
+ _fork_state.fork_handler_registered = True
+
+
+
+
+class ForkManagedThread(object):
+ def __init__(self, target, args=()):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ def managed_target(*args):
+ try:
+ target(*args)
+ finally:
+ _fork_state.active_thread_count.decrement()
+ self._thread = threading.Thread(target=_run_with_context(managed_target), args=args)
+ else:
+ self._thread = threading.Thread(target=_run_with_context(target), args=args)
+
+ def setDaemon(self, daemonic):
+ self._thread.daemon = daemonic
+
+ def start(self):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.active_thread_count.increment()
+ self._thread.start()
+
+ def join(self):
+ self._thread.join()
+
+
+def block_if_fork_in_progress(postfork_state_to_reset=None):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ with _fork_state.fork_in_progress_condition:
+ if not _fork_state.fork_in_progress:
+ return
+ if postfork_state_to_reset is not None:
+ _fork_state.postfork_states_to_reset.append(postfork_state_to_reset)
+ _fork_state.active_thread_count.decrement()
+ _fork_state.fork_in_progress_condition.wait()
+ _fork_state.active_thread_count.increment()
+
+
+def enter_user_request_generator():
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.active_thread_count.decrement()
+
+
+def return_from_user_request_generator():
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.active_thread_count.increment()
+ block_if_fork_in_progress()
+
+
+def get_fork_epoch():
+ return _fork_state.fork_epoch
+
+
+def is_fork_support_enabled():
+ return _GRPC_ENABLE_FORK_SUPPORT
+
+
+def fork_register_channel(channel):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.channels.add(channel)
+
+
+def fork_unregister_channel(channel):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.channels.discard(channel)
+
+
+class _ActiveThreadCount(object):
+ def __init__(self):
+ self._num_active_threads = 0
+ self._condition = threading.Condition()
+
+ def increment(self):
+ with self._condition:
+ self._num_active_threads += 1
+
+ def decrement(self):
+ with self._condition:
+ self._num_active_threads -= 1
+ if self._num_active_threads == 0:
+ self._condition.notify_all()
+
+ def await_zero_threads(self, timeout_secs):
+ end_time = time.time() + timeout_secs
+ wait_time = timeout_secs
+ with self._condition:
+ while True:
+ if self._num_active_threads > 0:
+ self._condition.wait(wait_time)
+ if self._num_active_threads == 0:
+ return True
+ # Thread count may have increased before this re-obtains the
+ # lock after a notify(). Wait again until timeout_secs has
+ # elapsed.
+ wait_time = end_time - time.time()
+ if wait_time <= 0:
+ return False
+
+
+class _ForkState(object):
+ def __init__(self):
+ self.fork_in_progress_condition = threading.Condition()
+ self.fork_in_progress = False
+ self.postfork_states_to_reset = []
+ self.fork_handler_registered_lock = threading.Lock()
+ self.fork_handler_registered = False
+ self.active_thread_count = _ActiveThreadCount()
+ self.fork_epoch = 0
+ self.channels = set()
+
+
+_fork_state = _ForkState()
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_windows.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_windows.pyx.pxi
new file mode 100644
index 0000000000..67aaf4d033
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/fork_windows.pyx.pxi
@@ -0,0 +1,61 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# No-op implementations for Windows.
+
+def fork_handlers_and_grpc_init():
+ grpc_init()
+
+
+class ForkManagedThread(object):
+ def __init__(self, target, args=()):
+ self._thread = threading.Thread(target=_run_with_context(target), args=args)
+
+ def setDaemon(self, daemonic):
+ self._thread.daemon = daemonic
+
+ def start(self):
+ self._thread.start()
+
+ def join(self):
+ self._thread.join()
+
+
+def block_if_fork_in_progress(postfork_state_to_reset=None):
+ pass
+
+
+def enter_user_request_generator():
+ pass
+
+
+def return_from_user_request_generator():
+ pass
+
+
+def get_fork_epoch():
+ return 0
+
+
+def is_fork_support_enabled():
+ return False
+
+
+def fork_register_channel(channel):
+ pass
+
+
+def fork_unregister_channel(channel):
+ pass
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc.pxi
new file mode 100644
index 0000000000..36e8312d02
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc.pxi
@@ -0,0 +1,729 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cimport libc.time
+
+ctypedef ssize_t intptr_t
+ctypedef size_t uintptr_t
+ctypedef signed char int8_t
+ctypedef signed short int16_t
+ctypedef signed int int32_t
+ctypedef signed long long int64_t
+ctypedef unsigned char uint8_t
+ctypedef unsigned short uint16_t
+ctypedef unsigned int uint32_t
+ctypedef unsigned long long uint64_t
+
+# C++ Utilities
+
+# NOTE(lidiz) Unfortunately, we can't use "cimport" here because Cython
+# links it with exception handling. It introduces new dependencies.
+cdef extern from "<queue>" namespace "std" nogil:
+ cdef cppclass queue[T]:
+ queue()
+ bint empty()
+ T& front()
+ T& back()
+ void pop()
+ void push(T&)
+ size_t size()
+
+
+cdef extern from "<mutex>" namespace "std" nogil:
+ cdef cppclass mutex:
+ mutex()
+ void lock()
+ void unlock()
+
+ cdef cppclass unique_lock[Mutex]:
+ unique_lock(Mutex&)
+
+cdef extern from "<condition_variable>" namespace "std" nogil:
+ cdef cppclass condition_variable:
+ condition_variable()
+ void notify_all()
+ void wait(unique_lock[mutex]&)
+
+# gRPC Core Declarations
+
+cdef extern from "grpc/support/alloc.h":
+
+ void *gpr_malloc(size_t size) nogil
+ void *gpr_zalloc(size_t size) nogil
+ void gpr_free(void *ptr) nogil
+ void *gpr_realloc(void *p, size_t size) nogil
+
+
+cdef extern from "grpc/byte_buffer_reader.h":
+
+ struct grpc_byte_buffer_reader:
+ # We don't care about the internals
+ pass
+
+
+cdef extern from "grpc/impl/codegen/grpc_types.h":
+ ctypedef struct grpc_completion_queue_functor:
+ void (*functor_run)(grpc_completion_queue_functor*, int);
+
+
+cdef extern from "grpc/grpc.h":
+
+ ctypedef struct grpc_slice:
+ # don't worry about writing out the members of grpc_slice; we never access
+ # them directly.
+ pass
+
+ grpc_slice grpc_slice_ref(grpc_slice s) nogil
+ void grpc_slice_unref(grpc_slice s) nogil
+ grpc_slice grpc_empty_slice() nogil
+ grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) nogil
+ grpc_slice grpc_slice_new_with_len(
+ void *p, size_t len, void (*destroy)(void *, size_t)) nogil
+ grpc_slice grpc_slice_malloc(size_t length) nogil
+ grpc_slice grpc_slice_from_copied_string(const char *source) nogil
+ grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len) nogil
+ grpc_slice grpc_slice_copy(grpc_slice s) nogil
+
+ # Declare functions for function-like macros (because Cython)...
+ void *grpc_slice_start_ptr "GRPC_SLICE_START_PTR" (grpc_slice s) nogil
+ size_t grpc_slice_length "GRPC_SLICE_LENGTH" (grpc_slice s) nogil
+
+ const int GPR_MS_PER_SEC
+ const int GPR_US_PER_SEC
+ const int GPR_NS_PER_SEC
+
+ ctypedef enum gpr_clock_type:
+ GPR_CLOCK_MONOTONIC
+ GPR_CLOCK_REALTIME
+ GPR_CLOCK_PRECISE
+ GPR_TIMESPAN
+
+ ctypedef struct gpr_timespec:
+ int64_t seconds "tv_sec"
+ int32_t nanoseconds "tv_nsec"
+ gpr_clock_type clock_type
+
+ gpr_timespec gpr_time_0(gpr_clock_type type) nogil
+ gpr_timespec gpr_inf_future(gpr_clock_type type) nogil
+ gpr_timespec gpr_inf_past(gpr_clock_type type) nogil
+
+ gpr_timespec gpr_now(gpr_clock_type clock) nogil
+
+ gpr_timespec gpr_convert_clock_type(gpr_timespec t,
+ gpr_clock_type target_clock) nogil
+
+ gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) nogil
+ gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) nogil
+ double gpr_timespec_to_micros(gpr_timespec t) nogil
+
+ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) nogil
+
+ int gpr_time_cmp(gpr_timespec a, gpr_timespec b) nogil
+
+ ctypedef struct grpc_byte_buffer:
+ # We don't care about the internals.
+ pass
+
+ grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
+ size_t nslices) nogil
+ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil
+ void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil
+
+ int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
+ grpc_byte_buffer *buffer) nogil
+ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
+ grpc_slice *slice) nogil
+ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil
+
+ ctypedef enum grpc_status_code:
+ GRPC_STATUS_OK
+ GRPC_STATUS_CANCELLED
+ GRPC_STATUS_UNKNOWN
+ GRPC_STATUS_INVALID_ARGUMENT
+ GRPC_STATUS_DEADLINE_EXCEEDED
+ GRPC_STATUS_NOT_FOUND
+ GRPC_STATUS_ALREADY_EXISTS
+ GRPC_STATUS_PERMISSION_DENIED
+ GRPC_STATUS_UNAUTHENTICATED
+ GRPC_STATUS_RESOURCE_EXHAUSTED
+ GRPC_STATUS_FAILED_PRECONDITION
+ GRPC_STATUS_ABORTED
+ GRPC_STATUS_OUT_OF_RANGE
+ GRPC_STATUS_UNIMPLEMENTED
+ GRPC_STATUS_INTERNAL
+ GRPC_STATUS_UNAVAILABLE
+ GRPC_STATUS_DATA_LOSS
+ GRPC_STATUS__DO_NOT_USE
+
+ const char *GRPC_ARG_ENABLE_CENSUS
+ const char *GRPC_ARG_MAX_CONCURRENT_STREAMS
+ const char *GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH
+ const char *GRPC_ARG_MAX_SEND_MESSAGE_LENGTH
+ const char *GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
+ const char *GRPC_ARG_DEFAULT_AUTHORITY
+ const char *GRPC_ARG_PRIMARY_USER_AGENT_STRING
+ const char *GRPC_ARG_SECONDARY_USER_AGENT_STRING
+ const char *GRPC_SSL_TARGET_NAME_OVERRIDE_ARG
+ const char *GRPC_SSL_SESSION_CACHE_ARG
+ const char *_GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM \
+ "GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM"
+ const char *GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL
+ const char *GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET
+
+ const int GRPC_WRITE_BUFFER_HINT
+ const int GRPC_WRITE_NO_COMPRESS
+ const int GRPC_WRITE_USED_MASK
+
+ const int GRPC_INITIAL_METADATA_WAIT_FOR_READY
+ const int GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+ const int GRPC_INITIAL_METADATA_USED_MASK
+
+ const int GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
+
+ ctypedef struct grpc_completion_queue:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_channel:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_server:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_call:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef enum grpc_arg_type:
+ GRPC_ARG_STRING
+ GRPC_ARG_INTEGER
+ GRPC_ARG_POINTER
+
+ ctypedef struct grpc_arg_pointer_vtable:
+ void *(*copy)(void *)
+ void (*destroy)(void *)
+ int (*cmp)(void *, void *)
+
+ ctypedef struct grpc_arg_value_pointer:
+ void *address "p"
+ grpc_arg_pointer_vtable *vtable
+
+ union grpc_arg_value:
+ char *string
+ int integer
+ grpc_arg_value_pointer pointer
+
+ ctypedef struct grpc_arg:
+ grpc_arg_type type
+ char *key
+ grpc_arg_value value
+
+ ctypedef struct grpc_channel_args:
+ size_t arguments_length "num_args"
+ grpc_arg *arguments "args"
+
+ ctypedef enum grpc_stream_compression_level:
+ GRPC_STREAM_COMPRESS_LEVEL_NONE
+ GRPC_STREAM_COMPRESS_LEVEL_LOW
+ GRPC_STREAM_COMPRESS_LEVEL_MED
+ GRPC_STREAM_COMPRESS_LEVEL_HIGH
+
+ ctypedef enum grpc_call_error:
+ GRPC_CALL_OK
+ GRPC_CALL_ERROR
+ GRPC_CALL_ERROR_NOT_ON_SERVER
+ GRPC_CALL_ERROR_NOT_ON_CLIENT
+ GRPC_CALL_ERROR_ALREADY_ACCEPTED
+ GRPC_CALL_ERROR_ALREADY_INVOKED
+ GRPC_CALL_ERROR_NOT_INVOKED
+ GRPC_CALL_ERROR_ALREADY_FINISHED
+ GRPC_CALL_ERROR_TOO_MANY_OPERATIONS
+ GRPC_CALL_ERROR_INVALID_FLAGS
+ GRPC_CALL_ERROR_INVALID_METADATA
+
+ ctypedef enum grpc_cq_completion_type:
+ GRPC_CQ_NEXT
+ GRPC_CQ_PLUCK
+
+ ctypedef enum grpc_cq_polling_type:
+ GRPC_CQ_DEFAULT_POLLING
+ GRPC_CQ_NON_LISTENING
+ GRPC_CQ_NON_POLLING
+
+ ctypedef struct grpc_completion_queue_attributes:
+ int version
+ grpc_cq_completion_type cq_completion_type
+ grpc_cq_polling_type cq_polling_type
+ void* cq_shutdown_cb
+
+ ctypedef enum grpc_connectivity_state:
+ GRPC_CHANNEL_IDLE
+ GRPC_CHANNEL_CONNECTING
+ GRPC_CHANNEL_READY
+ GRPC_CHANNEL_TRANSIENT_FAILURE
+ GRPC_CHANNEL_SHUTDOWN
+
+ ctypedef struct grpc_metadata:
+ grpc_slice key
+ grpc_slice value
+ # ignore the 'internal_data.obfuscated' fields.
+
+ ctypedef enum grpc_completion_type:
+ GRPC_QUEUE_SHUTDOWN
+ GRPC_QUEUE_TIMEOUT
+ GRPC_OP_COMPLETE
+
+ ctypedef struct grpc_event:
+ grpc_completion_type type
+ int success
+ void *tag
+
+ ctypedef struct grpc_metadata_array:
+ size_t count
+ size_t capacity
+ grpc_metadata *metadata
+
+ void grpc_metadata_array_init(grpc_metadata_array *array) nogil
+ void grpc_metadata_array_destroy(grpc_metadata_array *array) nogil
+
+ ctypedef struct grpc_call_details:
+ grpc_slice method
+ grpc_slice host
+ gpr_timespec deadline
+
+ void grpc_call_details_init(grpc_call_details *details) nogil
+ void grpc_call_details_destroy(grpc_call_details *details) nogil
+
+ ctypedef enum grpc_op_type:
+ GRPC_OP_SEND_INITIAL_METADATA
+ GRPC_OP_SEND_MESSAGE
+ GRPC_OP_SEND_CLOSE_FROM_CLIENT
+ GRPC_OP_SEND_STATUS_FROM_SERVER
+ GRPC_OP_RECV_INITIAL_METADATA
+ GRPC_OP_RECV_MESSAGE
+ GRPC_OP_RECV_STATUS_ON_CLIENT
+ GRPC_OP_RECV_CLOSE_ON_SERVER
+
+ ctypedef struct grpc_op_send_initial_metadata_maybe_compression_level:
+ uint8_t is_set
+ grpc_compression_level level
+
+ ctypedef struct grpc_op_data_send_initial_metadata:
+ size_t count
+ grpc_metadata *metadata
+ grpc_op_send_initial_metadata_maybe_compression_level maybe_compression_level
+
+ ctypedef struct grpc_op_data_send_status_from_server:
+ size_t trailing_metadata_count
+ grpc_metadata *trailing_metadata
+ grpc_status_code status
+ grpc_slice *status_details
+
+ ctypedef struct grpc_op_data_recv_status_on_client:
+ grpc_metadata_array *trailing_metadata
+ grpc_status_code *status
+ grpc_slice *status_details
+ char** error_string
+
+ ctypedef struct grpc_op_data_recv_close_on_server:
+ int *cancelled
+
+ ctypedef struct grpc_op_data_send_message:
+ grpc_byte_buffer *send_message
+
+ ctypedef struct grpc_op_data_receive_message:
+ grpc_byte_buffer **receive_message "recv_message"
+
+ ctypedef struct grpc_op_data_receive_initial_metadata:
+ grpc_metadata_array *receive_initial_metadata "recv_initial_metadata"
+
+ union grpc_op_data:
+ grpc_op_data_send_initial_metadata send_initial_metadata
+ grpc_op_data_send_message send_message
+ grpc_op_data_send_status_from_server send_status_from_server
+ grpc_op_data_receive_initial_metadata receive_initial_metadata "recv_initial_metadata"
+ grpc_op_data_receive_message receive_message "recv_message"
+ grpc_op_data_recv_status_on_client receive_status_on_client "recv_status_on_client"
+ grpc_op_data_recv_close_on_server receive_close_on_server "recv_close_on_server"
+
+ ctypedef struct grpc_op:
+ grpc_op_type type "op"
+ uint32_t flags
+ void * reserved
+ grpc_op_data data
+
+ void grpc_dont_init_openssl() nogil
+ void grpc_init() nogil
+ void grpc_shutdown() nogil
+ void grpc_shutdown_blocking() nogil
+ int grpc_is_initialized() nogil
+
+ ctypedef struct grpc_completion_queue_factory:
+ pass
+
+ grpc_completion_queue_factory *grpc_completion_queue_factory_lookup(
+ const grpc_completion_queue_attributes* attributes) nogil
+ grpc_completion_queue *grpc_completion_queue_create(
+ const grpc_completion_queue_factory* factory,
+ const grpc_completion_queue_attributes* attr, void* reserved) nogil
+ grpc_completion_queue *grpc_completion_queue_create_for_next(void *reserved) nogil
+
+ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
+ gpr_timespec deadline,
+ void *reserved) nogil
+ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
+ gpr_timespec deadline,
+ void *reserved) nogil
+ void grpc_completion_queue_shutdown(grpc_completion_queue *cq) nogil
+ void grpc_completion_queue_destroy(grpc_completion_queue *cq) nogil
+
+ grpc_completion_queue *grpc_completion_queue_create_for_callback(
+ grpc_completion_queue_functor* shutdown_callback,
+ void *reserved) nogil
+
+ grpc_call_error grpc_call_start_batch(
+ grpc_call *call, const grpc_op *ops, size_t nops, void *tag,
+ void *reserved) nogil
+ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) nogil
+ grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
+ grpc_status_code status,
+ const char *description,
+ void *reserved) nogil
+ char *grpc_call_get_peer(grpc_call *call) nogil
+ void grpc_call_unref(grpc_call *call) nogil
+
+ grpc_call *grpc_channel_create_call(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *completion_queue, grpc_slice method,
+ const grpc_slice *host, gpr_timespec deadline, void *reserved) nogil
+ grpc_connectivity_state grpc_channel_check_connectivity_state(
+ grpc_channel *channel, int try_to_connect) nogil
+ void grpc_channel_watch_connectivity_state(
+ grpc_channel *channel, grpc_connectivity_state last_observed_state,
+ gpr_timespec deadline, grpc_completion_queue *cq, void *tag) nogil
+ char *grpc_channel_get_target(grpc_channel *channel) nogil
+ void grpc_channel_destroy(grpc_channel *channel) nogil
+
+ grpc_server *grpc_server_create(
+ const grpc_channel_args *args, void *reserved) nogil
+ grpc_call_error grpc_server_request_call(
+ grpc_server *server, grpc_call **call, grpc_call_details *details,
+ grpc_metadata_array *request_metadata, grpc_completion_queue
+ *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void
+ *tag_new) nogil
+ void grpc_server_register_completion_queue(grpc_server *server,
+ grpc_completion_queue *cq,
+ void *reserved) nogil
+
+ ctypedef struct grpc_server_config_fetcher:
+ pass
+
+ void grpc_server_set_config_fetcher(
+ grpc_server* server, grpc_server_config_fetcher* config_fetcher) nogil
+
+ ctypedef struct grpc_server_xds_status_notifier:
+ void (*on_serving_status_update)(void* user_data, const char* uri,
+ grpc_status_code code,
+ const char* error_message)
+ void* user_data;
+
+ grpc_server_config_fetcher* grpc_server_config_fetcher_xds_create(
+ grpc_server_xds_status_notifier notifier,
+ const grpc_channel_args* args) nogil
+
+
+ void grpc_server_start(grpc_server *server) nogil
+ void grpc_server_shutdown_and_notify(
+ grpc_server *server, grpc_completion_queue *cq, void *tag) nogil
+ void grpc_server_cancel_all_calls(grpc_server *server) nogil
+ void grpc_server_destroy(grpc_server *server) nogil
+
+ char* grpc_channelz_get_top_channels(intptr_t start_channel_id)
+ char* grpc_channelz_get_servers(intptr_t start_server_id)
+ char* grpc_channelz_get_server(intptr_t server_id)
+ char* grpc_channelz_get_server_sockets(intptr_t server_id,
+ intptr_t start_socket_id,
+ intptr_t max_results)
+ char* grpc_channelz_get_channel(intptr_t channel_id)
+ char* grpc_channelz_get_subchannel(intptr_t subchannel_id)
+ char* grpc_channelz_get_socket(intptr_t socket_id)
+
+ grpc_slice grpc_dump_xds_configs() nogil
+
+
+cdef extern from "grpc/grpc_security.h":
+
+ # Declare this as an enum, this is the only way to make it a const in
+ # cython
+ enum: GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX
+
+ ctypedef enum grpc_ssl_roots_override_result:
+ GRPC_SSL_ROOTS_OVERRIDE_OK
+ GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY
+ GRPC_SSL_ROOTS_OVERRIDE_FAILED
+
+ ctypedef enum grpc_ssl_client_certificate_request_type:
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY
+ GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+
+ ctypedef enum grpc_security_level:
+ GRPC_SECURITY_MIN
+ GRPC_SECURITY_NONE = GRPC_SECURITY_MIN
+ GRPC_INTEGRITY_ONLY
+ GRPC_PRIVACY_AND_INTEGRITY
+ GRPC_SECURITY_MAX = GRPC_PRIVACY_AND_INTEGRITY
+
+ ctypedef enum grpc_ssl_certificate_config_reload_status:
+ GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED
+ GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW
+ GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
+
+ ctypedef struct grpc_ssl_server_certificate_config:
+ # We don't care about the internals
+ pass
+
+ ctypedef struct grpc_ssl_server_credentials_options:
+ # We don't care about the internals
+ pass
+
+ grpc_ssl_server_certificate_config * grpc_ssl_server_certificate_config_create(
+ const char *pem_root_certs,
+ const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs)
+
+ void grpc_ssl_server_certificate_config_destroy(grpc_ssl_server_certificate_config *config)
+
+ ctypedef grpc_ssl_certificate_config_reload_status (*grpc_ssl_server_certificate_config_callback)(
+ void *user_data,
+ grpc_ssl_server_certificate_config **config)
+
+ grpc_ssl_server_credentials_options *grpc_ssl_server_credentials_create_options_using_config(
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_certificate_config *certificate_config)
+
+ grpc_ssl_server_credentials_options* grpc_ssl_server_credentials_create_options_using_config_fetcher(
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_certificate_config_callback cb,
+ void *user_data)
+
+ grpc_server_credentials *grpc_ssl_server_credentials_create_with_options(
+ grpc_ssl_server_credentials_options *options)
+
+ ctypedef struct grpc_ssl_pem_key_cert_pair:
+ const char *private_key
+ const char *certificate_chain "cert_chain"
+
+ ctypedef struct grpc_channel_credentials:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_call_credentials:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_ssl_session_cache:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct verify_peer_options:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef void (*grpc_ssl_roots_override_callback)(char **pem_root_certs)
+
+ grpc_ssl_session_cache *grpc_ssl_session_cache_create_lru(size_t capacity)
+ void grpc_ssl_session_cache_destroy(grpc_ssl_session_cache* cache)
+
+ void grpc_set_ssl_roots_override_callback(
+ grpc_ssl_roots_override_callback cb) nogil
+
+ grpc_channel_credentials *grpc_google_default_credentials_create(grpc_call_credentials* call_credentials) nogil
+ grpc_channel_credentials *grpc_ssl_credentials_create(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
+ verify_peer_options *verify_options, void *reserved) nogil
+ grpc_channel_credentials *grpc_composite_channel_credentials_create(
+ grpc_channel_credentials *creds1, grpc_call_credentials *creds2,
+ void *reserved) nogil
+ void grpc_channel_credentials_release(grpc_channel_credentials *creds) nogil
+
+ grpc_channel_credentials *grpc_xds_credentials_create(
+ grpc_channel_credentials *fallback_creds) nogil
+
+ grpc_channel_credentials *grpc_insecure_credentials_create() nogil
+
+ grpc_server_credentials *grpc_xds_server_credentials_create(
+ grpc_server_credentials *fallback_creds) nogil
+
+ grpc_server_credentials *grpc_insecure_server_credentials_create() nogil
+
+ grpc_call_credentials *grpc_composite_call_credentials_create(
+ grpc_call_credentials *creds1, grpc_call_credentials *creds2,
+ void *reserved) nogil
+ grpc_call_credentials *grpc_google_compute_engine_credentials_create(
+ void *reserved) nogil
+ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
+ const char *json_key,
+ gpr_timespec token_lifetime, void *reserved) nogil
+ grpc_call_credentials *grpc_google_refresh_token_credentials_create(
+ const char *json_refresh_token, void *reserved) nogil
+ grpc_call_credentials *grpc_google_iam_credentials_create(
+ const char *authorization_token, const char *authority_selector,
+ void *reserved) nogil
+ void grpc_call_credentials_release(grpc_call_credentials *creds) nogil
+
+ grpc_channel *grpc_channel_create(
+ const char *target, grpc_channel_credentials *creds,
+ const grpc_channel_args *args) nogil
+
+ ctypedef struct grpc_server_credentials:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ void grpc_server_credentials_release(grpc_server_credentials *creds) nogil
+
+ int grpc_server_add_http2_port(grpc_server *server, const char *addr,
+ grpc_server_credentials *creds) nogil
+
+ grpc_call_error grpc_call_set_credentials(grpc_call *call,
+ grpc_call_credentials *creds) nogil
+
+ ctypedef struct grpc_auth_context:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_auth_metadata_context:
+ const char *service_url
+ const char *method_name
+ const grpc_auth_context *channel_auth_context
+
+ ctypedef void (*grpc_credentials_plugin_metadata_cb)(
+ void *user_data, const grpc_metadata *creds_md, size_t num_creds_md,
+ grpc_status_code status, const char *error_details) nogil
+
+ ctypedef struct grpc_metadata_credentials_plugin:
+ int (*get_metadata)(
+ void *state, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) except *
+ void (*destroy)(void *state) except *
+ void *state
+ const char *type
+
+ grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
+ grpc_metadata_credentials_plugin plugin, grpc_security_level min_security_level, void *reserved) nogil
+
+ ctypedef struct grpc_auth_property_iterator:
+ pass
+
+ ctypedef struct grpc_auth_property:
+ char *name
+ char *value
+ size_t value_length
+
+ grpc_auth_property *grpc_auth_property_iterator_next(
+ grpc_auth_property_iterator *it)
+
+ grpc_auth_property_iterator grpc_auth_context_property_iterator(
+ const grpc_auth_context *ctx)
+
+ grpc_auth_property_iterator grpc_auth_context_peer_identity(
+ const grpc_auth_context *ctx)
+
+ char *grpc_auth_context_peer_identity_property_name(
+ const grpc_auth_context *ctx)
+
+ grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
+ const grpc_auth_context *ctx, const char *name)
+
+ grpc_auth_context_peer_is_authenticated(
+ const grpc_auth_context *ctx)
+
+ grpc_auth_context *grpc_call_auth_context(grpc_call *call)
+
+ void grpc_auth_context_release(grpc_auth_context *context)
+
+ grpc_channel_credentials *grpc_local_credentials_create(
+ grpc_local_connect_type type)
+ grpc_server_credentials *grpc_local_server_credentials_create(
+ grpc_local_connect_type type)
+
+ ctypedef struct grpc_alts_credentials_options:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ grpc_channel_credentials *grpc_alts_credentials_create(
+ const grpc_alts_credentials_options *options)
+ grpc_server_credentials *grpc_alts_server_credentials_create(
+ const grpc_alts_credentials_options *options)
+
+ grpc_alts_credentials_options* grpc_alts_credentials_client_options_create()
+ grpc_alts_credentials_options* grpc_alts_credentials_server_options_create()
+ void grpc_alts_credentials_options_destroy(grpc_alts_credentials_options *options)
+ void grpc_alts_credentials_client_options_add_target_service_account(grpc_alts_credentials_options *options, const char *service_account)
+
+
+
+cdef extern from "grpc/compression.h":
+
+ ctypedef enum grpc_compression_algorithm:
+ GRPC_COMPRESS_NONE
+ GRPC_COMPRESS_DEFLATE
+ GRPC_COMPRESS_GZIP
+ GRPC_COMPRESS_STREAM_GZIP
+ GRPC_COMPRESS_ALGORITHMS_COUNT
+
+ ctypedef enum grpc_compression_level:
+ GRPC_COMPRESS_LEVEL_NONE
+ GRPC_COMPRESS_LEVEL_LOW
+ GRPC_COMPRESS_LEVEL_MED
+ GRPC_COMPRESS_LEVEL_HIGH
+ GRPC_COMPRESS_LEVEL_COUNT
+
+ ctypedef struct grpc_compression_options:
+ uint32_t enabled_algorithms_bitset
+
+ int grpc_compression_algorithm_parse(
+ grpc_slice value, grpc_compression_algorithm *algorithm) nogil
+ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
+ const char **name) nogil
+ grpc_compression_algorithm grpc_compression_algorithm_for_level(
+ grpc_compression_level level, uint32_t accepted_encodings) nogil
+ void grpc_compression_options_init(grpc_compression_options *opts) nogil
+ void grpc_compression_options_enable_algorithm(
+ grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) nogil
+ void grpc_compression_options_disable_algorithm(
+ grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) nogil
+ int grpc_compression_options_is_algorithm_enabled(
+ const grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) nogil
+
+cdef extern from "grpc/impl/codegen/compression_types.h":
+
+ const char *_GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY \
+ "GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY"
+
+
+cdef extern from "grpc/grpc_security_constants.h":
+ ctypedef enum grpc_local_connect_type:
+ UDS
+ LOCAL_TCP
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi
new file mode 100644
index 0000000000..baa9fb54a3
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi
@@ -0,0 +1,21 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+g_gevent_threadpool = None
+g_gevent_activated = False
+
+cpdef void gevent_increment_channel_count()
+
+cpdef void gevent_decrement_channel_count()
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
new file mode 100644
index 0000000000..41d27df594
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
@@ -0,0 +1,137 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+from libc cimport string
+from cython.operator cimport dereference
+
+from cpython cimport Py_INCREF, Py_DECREF
+
+import atexit
+import errno
+import sys
+
+gevent_hub = None
+g_gevent_pool = None
+g_gevent_threadpool = None
+g_gevent_activated = False
+
+
+cdef queue[void*] g_greenlets_to_run
+cdef condition_variable g_greenlets_cv
+cdef mutex g_greenlets_mu
+cdef bint g_shutdown_greenlets_to_run_queue = False
+cdef int g_channel_count = 0
+
+
+cdef _submit_to_greenlet_queue(object cb, tuple args):
+ cdef tuple to_call = (cb,) + args
+ cdef unique_lock[mutex]* lk
+ Py_INCREF(to_call)
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ g_greenlets_to_run.push(<void*>(to_call))
+ del lk
+ g_greenlets_cv.notify_all()
+
+
+cpdef void gevent_increment_channel_count():
+ global g_channel_count
+ cdef int old_channel_count
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ old_channel_count = g_channel_count
+ g_channel_count += 1
+ del lk
+ if old_channel_count == 0:
+ run_spawn_greenlets()
+
+
+cpdef void gevent_decrement_channel_count():
+ global g_channel_count
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ g_channel_count -= 1
+ if g_channel_count == 0:
+ g_greenlets_cv.notify_all()
+ del lk
+
+
+cdef object await_next_greenlet():
+ cdef unique_lock[mutex]* lk
+ with nogil:
+ # Cython doesn't allow us to do proper stack allocations, so we can't take
+ # advantage of RAII.
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ while not g_shutdown_greenlets_to_run_queue and g_channel_count != 0:
+ if not g_greenlets_to_run.empty():
+ break
+ g_greenlets_cv.wait(dereference(lk))
+ if g_channel_count == 0:
+ del lk
+ return None
+ if g_shutdown_greenlets_to_run_queue:
+ del lk
+ return None
+ cdef object to_call = <object>g_greenlets_to_run.front()
+ Py_DECREF(to_call)
+ g_greenlets_to_run.pop()
+ del lk
+ return to_call
+
+def spawn_greenlets():
+ while True:
+ to_call = g_gevent_threadpool.apply(await_next_greenlet, ())
+ if to_call is None:
+ break
+ fn = to_call[0]
+ args = to_call[1:]
+ fn(*args)
+
+def run_spawn_greenlets():
+ g_gevent_pool.spawn(spawn_greenlets)
+
+def shutdown_await_next_greenlet():
+ global g_shutdown_greenlets_to_run_queue
+ cdef unique_lock[mutex]* lk
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ g_shutdown_greenlets_to_run_queue = True
+ del lk
+ g_greenlets_cv.notify_all()
+
+def init_grpc_gevent():
+ # Lazily import gevent
+ global gevent_hub
+ global g_gevent_threadpool
+ global g_gevent_activated
+ global g_interrupt_check_period_ms
+ global g_gevent_pool
+
+ import gevent
+ import gevent.pool
+
+ gevent_hub = gevent.hub
+ g_gevent_threadpool = gevent_hub.get_hub().threadpool
+
+ g_gevent_activated = True
+ g_interrupt_check_period_ms = 2000
+
+ g_gevent_pool = gevent.pool.Group()
+
+
+ set_async_callback_func(_submit_to_greenlet_queue)
+
+ # TODO: Document how this all works.
+ atexit.register(shutdown_await_next_greenlet)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_string.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
new file mode 100644
index 0000000000..5c1e0679a9
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
@@ -0,0 +1,51 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This function will ascii encode unicode string inputs if necessary.
+# In Python3, unicode strings are the default str type.
+cdef bytes str_to_bytes(object s):
+ if s is None or isinstance(s, bytes):
+ return s
+ elif isinstance(s, unicode):
+ return s.encode('ascii')
+ else:
+ raise TypeError('Expected bytes, str, or unicode, not {}'.format(type(s)))
+
+
+# TODO(https://github.com/grpc/grpc/issues/13782): It would be nice for us if
+# the type of metadata that we accept were exactly the same as the type of
+# metadata that we deliver to our users (so "str" for this function's
+# parameter rather than "object"), but would it be nice for our users? Right
+# now we haven't yet heard from enough users to know one way or another.
+cdef bytes _encode(object string_or_none):
+ if string_or_none is None:
+ return b''
+ elif isinstance(string_or_none, (bytes,)):
+ return <bytes>string_or_none
+ elif isinstance(string_or_none, (unicode,)):
+ return string_or_none.encode('utf8')
+ else:
+ raise TypeError('Expected str, not {}'.format(type(string_or_none)))
+
+
+cdef str _decode(bytes bytestring):
+ if isinstance(bytestring, (str,)):
+ return <str>bytestring
+ else:
+ try:
+ return bytestring.decode('utf8')
+ except UnicodeDecodeError:
+ _LOGGER.exception('Invalid encoding on %s', bytestring)
+ return bytestring.decode('latin1')
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pxd.pxi
new file mode 100644
index 0000000000..fc72ac1576
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pxd.pxi
@@ -0,0 +1,26 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef void _store_c_metadata(
+ metadata, grpc_metadata **c_metadata, size_t *c_count) except *
+
+
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count) except *
+
+
+cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice)
+
+
+cdef tuple _metadata(grpc_metadata_array *c_metadata_array)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pyx.pxi
new file mode 100644
index 0000000000..b2dd1e3380
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/metadata.pyx.pxi
@@ -0,0 +1,73 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+
+class InitialMetadataFlags:
+ used_mask = GRPC_INITIAL_METADATA_USED_MASK
+ wait_for_ready = GRPC_INITIAL_METADATA_WAIT_FOR_READY
+ wait_for_ready_explicitly_set = GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+
+
+_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
+
+
+cdef void _store_c_metadata(
+ metadata, grpc_metadata **c_metadata, size_t *c_count) except *:
+ if metadata is None:
+ c_count[0] = 0
+ c_metadata[0] = NULL
+ else:
+ metadatum_count = len(metadata)
+ if metadatum_count == 0:
+ c_count[0] = 0
+ c_metadata[0] = NULL
+ else:
+ c_count[0] = metadatum_count
+ c_metadata[0] = <grpc_metadata *>gpr_malloc(
+ metadatum_count * sizeof(grpc_metadata))
+ for index, (key, value) in enumerate(metadata):
+ encoded_key = _encode(key)
+ encoded_value = value if encoded_key[-4:] == b'-bin' else _encode(value)
+ if not isinstance(encoded_value, bytes):
+ raise TypeError('Binary metadata key="%s" expected bytes, got %s' % (
+ key,
+ type(encoded_value)
+ ))
+ c_metadata[0][index].key = _slice_from_bytes(encoded_key)
+ c_metadata[0][index].value = _slice_from_bytes(encoded_value)
+
+
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count) except *:
+ if 0 < count:
+ for index in range(count):
+ grpc_slice_unref(c_metadata[index].key)
+ grpc_slice_unref(c_metadata[index].value)
+ gpr_free(c_metadata)
+
+
+cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice):
+ cdef bytes key = _slice_bytes(key_slice)
+ cdef bytes value = _slice_bytes(value_slice)
+ return <tuple>_Metadatum(
+ _decode(key), value if key[-4:] == b'-bin' else _decode(value))
+
+
+cdef tuple _metadata(grpc_metadata_array *c_metadata_array):
+ return tuple(
+ _metadatum(
+ c_metadata_array.metadata[index].key,
+ c_metadata_array.metadata[index].value)
+ for index in range(c_metadata_array.count))
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pxd.pxi
new file mode 100644
index 0000000000..c9df32dadf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pxd.pxi
@@ -0,0 +1,111 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Operation:
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+ # TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this!
+ cdef grpc_op c_op
+
+
+cdef class SendInitialMetadataOperation(Operation):
+
+ cdef readonly object _initial_metadata;
+ cdef readonly int _flags
+ cdef grpc_metadata *_c_initial_metadata
+ cdef size_t _c_initial_metadata_count
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class SendMessageOperation(Operation):
+
+ cdef readonly bytes _message
+ cdef readonly int _flags
+ cdef grpc_byte_buffer *_c_message_byte_buffer
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class SendCloseFromClientOperation(Operation):
+
+ cdef readonly int _flags
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class SendStatusFromServerOperation(Operation):
+
+ cdef readonly object _trailing_metadata
+ cdef readonly object _code
+ cdef readonly object _details
+ cdef readonly int _flags
+ cdef grpc_metadata *_c_trailing_metadata
+ cdef size_t _c_trailing_metadata_count
+ cdef grpc_slice _c_details
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveInitialMetadataOperation(Operation):
+
+ cdef readonly int _flags
+ cdef tuple _initial_metadata
+ cdef grpc_metadata_array _c_initial_metadata
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveMessageOperation(Operation):
+
+ cdef readonly int _flags
+ cdef grpc_byte_buffer *_c_message_byte_buffer
+ cdef bytes _message
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveStatusOnClientOperation(Operation):
+
+ cdef readonly int _flags
+ cdef grpc_metadata_array _c_trailing_metadata
+ cdef grpc_status_code _c_code
+ cdef grpc_slice _c_details
+ cdef const char* _c_error_string
+ cdef tuple _trailing_metadata
+ cdef object _code
+ cdef str _details
+ cdef str _error_string
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveCloseOnServerOperation(Operation):
+
+ cdef readonly int _flags
+ cdef object _cancelled
+ cdef int _c_cancelled
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pyx.pxi
new file mode 100644
index 0000000000..3f3fd75407
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/operation.pyx.pxi
@@ -0,0 +1,250 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Operation:
+
+ cdef void c(self) except *:
+ raise NotImplementedError()
+
+ cdef void un_c(self) except *:
+ raise NotImplementedError()
+
+
+cdef class SendInitialMetadataOperation(Operation):
+
+ def __cinit__(self, initial_metadata, flags):
+ self._initial_metadata = initial_metadata
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_INITIAL_METADATA
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
+ self.c_op.flags = self._flags
+ _store_c_metadata(
+ self._initial_metadata, &self._c_initial_metadata,
+ &self._c_initial_metadata_count)
+ self.c_op.data.send_initial_metadata.metadata = self._c_initial_metadata
+ self.c_op.data.send_initial_metadata.count = self._c_initial_metadata_count
+ self.c_op.data.send_initial_metadata.maybe_compression_level.is_set = 0
+
+ cdef void un_c(self) except *:
+ _release_c_metadata(
+ self._c_initial_metadata, self._c_initial_metadata_count)
+
+
+cdef class SendMessageOperation(Operation):
+
+ def __cinit__(self, bytes message, int flags):
+ if message is None:
+ self._message = b''
+ else:
+ self._message = message
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_MESSAGE
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_MESSAGE
+ self.c_op.flags = self._flags
+ cdef grpc_slice message_slice = grpc_slice_from_copied_buffer(
+ self._message, len(self._message))
+ self._c_message_byte_buffer = grpc_raw_byte_buffer_create(
+ &message_slice, 1)
+ grpc_slice_unref(message_slice)
+ self.c_op.data.send_message.send_message = self._c_message_byte_buffer
+
+ cdef void un_c(self) except *:
+ grpc_byte_buffer_destroy(self._c_message_byte_buffer)
+
+
+cdef class SendCloseFromClientOperation(Operation):
+
+ def __cinit__(self, int flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_CLOSE_FROM_CLIENT
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_CLOSE_FROM_CLIENT
+ self.c_op.flags = self._flags
+
+ cdef void un_c(self) except *:
+ pass
+
+
+cdef class SendStatusFromServerOperation(Operation):
+
+ def __cinit__(self, trailing_metadata, code, object details, int flags):
+ self._trailing_metadata = trailing_metadata
+ self._code = code
+ self._details = details
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_STATUS_FROM_SERVER
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
+ self.c_op.flags = self._flags
+ _store_c_metadata(
+ self._trailing_metadata, &self._c_trailing_metadata,
+ &self._c_trailing_metadata_count)
+ self.c_op.data.send_status_from_server.trailing_metadata = (
+ self._c_trailing_metadata)
+ self.c_op.data.send_status_from_server.trailing_metadata_count = (
+ self._c_trailing_metadata_count)
+ self.c_op.data.send_status_from_server.status = self._code
+ self._c_details = _slice_from_bytes(_encode(self._details))
+ self.c_op.data.send_status_from_server.status_details = &self._c_details
+
+ cdef void un_c(self) except *:
+ grpc_slice_unref(self._c_details)
+ _release_c_metadata(
+ self._c_trailing_metadata, self._c_trailing_metadata_count)
+
+
+cdef class ReceiveInitialMetadataOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_INITIAL_METADATA
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
+ self.c_op.flags = self._flags
+ grpc_metadata_array_init(&self._c_initial_metadata)
+ self.c_op.data.receive_initial_metadata.receive_initial_metadata = (
+ &self._c_initial_metadata)
+
+ cdef void un_c(self) except *:
+ self._initial_metadata = _metadata(&self._c_initial_metadata)
+ grpc_metadata_array_destroy(&self._c_initial_metadata)
+
+ def initial_metadata(self):
+ return self._initial_metadata
+
+
+cdef class ReceiveMessageOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_MESSAGE
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_MESSAGE
+ self.c_op.flags = self._flags
+ self.c_op.data.receive_message.receive_message = (
+ &self._c_message_byte_buffer)
+
+ cdef void un_c(self) except *:
+ cdef grpc_byte_buffer_reader message_reader
+ cdef bint message_reader_status
+ cdef grpc_slice message_slice
+ cdef size_t message_slice_length
+ cdef void *message_slice_pointer
+ if self._c_message_byte_buffer != NULL:
+ message_reader_status = grpc_byte_buffer_reader_init(
+ &message_reader, self._c_message_byte_buffer)
+ if message_reader_status:
+ message = bytearray()
+ while grpc_byte_buffer_reader_next(&message_reader, &message_slice):
+ message_slice_pointer = grpc_slice_start_ptr(message_slice)
+ message_slice_length = grpc_slice_length(message_slice)
+ message += (<char *>message_slice_pointer)[:message_slice_length]
+ grpc_slice_unref(message_slice)
+ grpc_byte_buffer_reader_destroy(&message_reader)
+ self._message = bytes(message)
+ else:
+ self._message = None
+ grpc_byte_buffer_destroy(self._c_message_byte_buffer)
+ else:
+ self._message = None
+
+ def message(self):
+ return self._message
+
+
+cdef class ReceiveStatusOnClientOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_STATUS_ON_CLIENT
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
+ self.c_op.flags = self._flags
+ grpc_metadata_array_init(&self._c_trailing_metadata)
+ self.c_op.data.receive_status_on_client.trailing_metadata = (
+ &self._c_trailing_metadata)
+ self.c_op.data.receive_status_on_client.status = (
+ &self._c_code)
+ self.c_op.data.receive_status_on_client.status_details = (
+ &self._c_details)
+ self.c_op.data.receive_status_on_client.error_string = (
+ &self._c_error_string)
+
+ cdef void un_c(self) except *:
+ self._trailing_metadata = _metadata(&self._c_trailing_metadata)
+ grpc_metadata_array_destroy(&self._c_trailing_metadata)
+ self._code = self._c_code
+ self._details = _decode(_slice_bytes(self._c_details))
+ grpc_slice_unref(self._c_details)
+ if self._c_error_string != NULL:
+ self._error_string = _decode(self._c_error_string)
+ gpr_free(<void*>self._c_error_string)
+ else:
+ self._error_string = ""
+
+ def trailing_metadata(self):
+ return self._trailing_metadata
+
+ def code(self):
+ return self._code
+
+ def details(self):
+ return self._details
+
+ def error_string(self):
+ return self._error_string
+
+
+cdef class ReceiveCloseOnServerOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_CLOSE_ON_SERVER
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_CLOSE_ON_SERVER
+ self.c_op.flags = self._flags
+ self.c_op.data.receive_close_on_server.cancelled = &self._c_cancelled
+
+ cdef void un_c(self) except *:
+ self._cancelled = bool(self._c_cancelled)
+
+ def cancelled(self):
+ return self._cancelled
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi
new file mode 100644
index 0000000000..cd6e94c816
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi
@@ -0,0 +1,20 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef extern from "grpc/impl/codegen/propagation_bits.h":
+ cdef int _GRPC_PROPAGATE_DEADLINE "GRPC_PROPAGATE_DEADLINE"
+ cdef int _GRPC_PROPAGATE_CENSUS_STATS_CONTEXT "GRPC_PROPAGATE_CENSUS_STATS_CONTEXT"
+ cdef int _GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT "GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT"
+ cdef int _GRPC_PROPAGATE_CANCELLATION "GRPC_PROPAGATE_CANCELLATION"
+ cdef int _GRPC_PROPAGATE_DEFAULTS "GRPC_PROPAGATE_DEFAULTS"
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi
new file mode 100644
index 0000000000..2dcc76a2db
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi
@@ -0,0 +1,20 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class PropagationConstants:
+ GRPC_PROPAGATE_DEADLINE = _GRPC_PROPAGATE_DEADLINE
+ GRPC_PROPAGATE_CENSUS_STATS_CONTEXT = _GRPC_PROPAGATE_CENSUS_STATS_CONTEXT
+ GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT = _GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT
+ GRPC_PROPAGATE_CANCELLATION = _GRPC_PROPAGATE_CANCELLATION
+ GRPC_PROPAGATE_DEFAULTS = _GRPC_PROPAGATE_DEFAULTS
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pxd.pxi
new file mode 100644
index 0000000000..35e1bdb0ae
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -0,0 +1,34 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef bytes _slice_bytes(grpc_slice slice)
+cdef grpc_slice _copy_slice(grpc_slice slice) nogil
+cdef grpc_slice _slice_from_bytes(bytes value) nogil
+
+
+cdef class CallDetails:
+
+ cdef grpc_call_details c_details
+
+
+cdef class SslPemKeyCertPair:
+
+ cdef grpc_ssl_pem_key_cert_pair c_pair
+ cdef readonly object private_key, certificate_chain
+
+
+cdef class CompressionOptions:
+
+ cdef grpc_compression_options c_options
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pyx.pxi
new file mode 100644
index 0000000000..05db7e3df9
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -0,0 +1,197 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef bytes _slice_bytes(grpc_slice slice):
+ cdef void *start = grpc_slice_start_ptr(slice)
+ cdef size_t length = grpc_slice_length(slice)
+ return (<const char *>start)[:length]
+
+cdef grpc_slice _copy_slice(grpc_slice slice) nogil:
+ cdef void *start = grpc_slice_start_ptr(slice)
+ cdef size_t length = grpc_slice_length(slice)
+ return grpc_slice_from_copied_buffer(<const char *>start, length)
+
+cdef grpc_slice _slice_from_bytes(bytes value) nogil:
+ cdef const char *value_ptr
+ cdef size_t length
+ with gil:
+ value_ptr = <const char *>value
+ length = len(value)
+ return grpc_slice_from_copied_buffer(value_ptr, length)
+
+
+class ConnectivityState:
+ idle = GRPC_CHANNEL_IDLE
+ connecting = GRPC_CHANNEL_CONNECTING
+ ready = GRPC_CHANNEL_READY
+ transient_failure = GRPC_CHANNEL_TRANSIENT_FAILURE
+ shutdown = GRPC_CHANNEL_SHUTDOWN
+
+
+class ChannelArgKey:
+ enable_census = GRPC_ARG_ENABLE_CENSUS
+ max_concurrent_streams = GRPC_ARG_MAX_CONCURRENT_STREAMS
+ max_receive_message_length = GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH
+ max_send_message_length = GRPC_ARG_MAX_SEND_MESSAGE_LENGTH
+ http2_initial_sequence_number = GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
+ default_authority = GRPC_ARG_DEFAULT_AUTHORITY
+ primary_user_agent_string = GRPC_ARG_PRIMARY_USER_AGENT_STRING
+ secondary_user_agent_string = GRPC_ARG_SECONDARY_USER_AGENT_STRING
+ ssl_session_cache = GRPC_SSL_SESSION_CACHE_ARG
+ ssl_target_name_override = GRPC_SSL_TARGET_NAME_OVERRIDE_ARG
+
+
+class WriteFlag:
+ buffer_hint = GRPC_WRITE_BUFFER_HINT
+ no_compress = GRPC_WRITE_NO_COMPRESS
+
+
+class StatusCode:
+ ok = GRPC_STATUS_OK
+ cancelled = GRPC_STATUS_CANCELLED
+ unknown = GRPC_STATUS_UNKNOWN
+ invalid_argument = GRPC_STATUS_INVALID_ARGUMENT
+ deadline_exceeded = GRPC_STATUS_DEADLINE_EXCEEDED
+ not_found = GRPC_STATUS_NOT_FOUND
+ already_exists = GRPC_STATUS_ALREADY_EXISTS
+ permission_denied = GRPC_STATUS_PERMISSION_DENIED
+ unauthenticated = GRPC_STATUS_UNAUTHENTICATED
+ resource_exhausted = GRPC_STATUS_RESOURCE_EXHAUSTED
+ failed_precondition = GRPC_STATUS_FAILED_PRECONDITION
+ aborted = GRPC_STATUS_ABORTED
+ out_of_range = GRPC_STATUS_OUT_OF_RANGE
+ unimplemented = GRPC_STATUS_UNIMPLEMENTED
+ internal = GRPC_STATUS_INTERNAL
+ unavailable = GRPC_STATUS_UNAVAILABLE
+ data_loss = GRPC_STATUS_DATA_LOSS
+
+
+class CallError:
+ ok = GRPC_CALL_OK
+ error = GRPC_CALL_ERROR
+ not_on_server = GRPC_CALL_ERROR_NOT_ON_SERVER
+ not_on_client = GRPC_CALL_ERROR_NOT_ON_CLIENT
+ already_accepted = GRPC_CALL_ERROR_ALREADY_ACCEPTED
+ already_invoked = GRPC_CALL_ERROR_ALREADY_INVOKED
+ not_invoked = GRPC_CALL_ERROR_NOT_INVOKED
+ already_finished = GRPC_CALL_ERROR_ALREADY_FINISHED
+ too_many_operations = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS
+ invalid_flags = GRPC_CALL_ERROR_INVALID_FLAGS
+ invalid_metadata = GRPC_CALL_ERROR_INVALID_METADATA
+
+
+class CompletionType:
+ queue_shutdown = GRPC_QUEUE_SHUTDOWN
+ queue_timeout = GRPC_QUEUE_TIMEOUT
+ operation_complete = GRPC_OP_COMPLETE
+
+
+class OperationType:
+ send_initial_metadata = GRPC_OP_SEND_INITIAL_METADATA
+ send_message = GRPC_OP_SEND_MESSAGE
+ send_close_from_client = GRPC_OP_SEND_CLOSE_FROM_CLIENT
+ send_status_from_server = GRPC_OP_SEND_STATUS_FROM_SERVER
+ receive_initial_metadata = GRPC_OP_RECV_INITIAL_METADATA
+ receive_message = GRPC_OP_RECV_MESSAGE
+ receive_status_on_client = GRPC_OP_RECV_STATUS_ON_CLIENT
+ receive_close_on_server = GRPC_OP_RECV_CLOSE_ON_SERVER
+
+GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM= (
+ _GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)
+
+GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY = (
+ _GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY)
+
+class CompressionAlgorithm:
+ none = GRPC_COMPRESS_NONE
+ deflate = GRPC_COMPRESS_DEFLATE
+ gzip = GRPC_COMPRESS_GZIP
+
+
+class CompressionLevel:
+ none = GRPC_COMPRESS_LEVEL_NONE
+ low = GRPC_COMPRESS_LEVEL_LOW
+ medium = GRPC_COMPRESS_LEVEL_MED
+ high = GRPC_COMPRESS_LEVEL_HIGH
+
+
+cdef class CallDetails:
+
+ def __cinit__(self):
+ fork_handlers_and_grpc_init()
+ with nogil:
+ grpc_call_details_init(&self.c_details)
+
+ def __dealloc__(self):
+ with nogil:
+ grpc_call_details_destroy(&self.c_details)
+ grpc_shutdown()
+
+ @property
+ def method(self):
+ return _slice_bytes(self.c_details.method)
+
+ @property
+ def host(self):
+ return _slice_bytes(self.c_details.host)
+
+ @property
+ def deadline(self):
+ return _time_from_timespec(self.c_details.deadline)
+
+
+cdef class SslPemKeyCertPair:
+
+ def __cinit__(self, bytes private_key, bytes certificate_chain):
+ self.private_key = private_key
+ self.certificate_chain = certificate_chain
+ self.c_pair.private_key = self.private_key
+ self.c_pair.certificate_chain = self.certificate_chain
+
+
+cdef class CompressionOptions:
+
+ def __cinit__(self):
+ with nogil:
+ grpc_compression_options_init(&self.c_options)
+
+ def enable_algorithm(self, grpc_compression_algorithm algorithm):
+ with nogil:
+ grpc_compression_options_enable_algorithm(&self.c_options, algorithm)
+
+ def disable_algorithm(self, grpc_compression_algorithm algorithm):
+ with nogil:
+ grpc_compression_options_disable_algorithm(&self.c_options, algorithm)
+
+ def is_algorithm_enabled(self, grpc_compression_algorithm algorithm):
+ cdef int result
+ with nogil:
+ result = grpc_compression_options_is_algorithm_enabled(
+ &self.c_options, algorithm)
+ return result
+
+ def to_channel_arg(self):
+ return (
+ GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
+ self.c_options.enabled_algorithms_bitset,
+ )
+
+
+def compression_algorithm_name(grpc_compression_algorithm algorithm):
+ cdef const char* name
+ with nogil:
+ grpc_compression_algorithm_name(algorithm, &name)
+ # Let Cython do the right thing with string casting
+ return name
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pxd.pxi
new file mode 100644
index 0000000000..e6e79536bb
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pxd.pxi
@@ -0,0 +1,17 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
+ char **pem_root_certs) nogil
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pyx.pxi
new file mode 100644
index 0000000000..9cc3fd5a21
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/security.pyx.pxi
@@ -0,0 +1,85 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libc.string cimport memcpy
+
+cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
+ char **pem_root_certs) nogil:
+ with gil:
+ temporary_pem_root_certs = ''
+ pem_root_certs[0] = <char *>gpr_malloc(len(temporary_pem_root_certs) + 1)
+ memcpy(
+ pem_root_certs[0], <char *>temporary_pem_root_certs,
+ len(temporary_pem_root_certs))
+ pem_root_certs[0][len(temporary_pem_root_certs)] = '\0'
+
+ return GRPC_SSL_ROOTS_OVERRIDE_OK
+
+
+def peer_identities(Call call):
+ cdef grpc_auth_context* auth_context
+ cdef grpc_auth_property_iterator properties
+ cdef const grpc_auth_property* property
+
+ auth_context = grpc_call_auth_context(call.c_call)
+ if auth_context == NULL:
+ return None
+ properties = grpc_auth_context_peer_identity(auth_context)
+ identities = []
+ while True:
+ property = grpc_auth_property_iterator_next(&properties)
+ if property == NULL:
+ break
+ if property.value != NULL:
+ identities.append(<bytes>(property.value))
+ grpc_auth_context_release(auth_context)
+ return identities if identities else None
+
+def peer_identity_key(Call call):
+ cdef grpc_auth_context* auth_context
+ cdef const char* c_key
+ auth_context = grpc_call_auth_context(call.c_call)
+ if auth_context == NULL:
+ return None
+ c_key = grpc_auth_context_peer_identity_property_name(auth_context)
+ if c_key == NULL:
+ key = None
+ else:
+ key = <bytes> grpc_auth_context_peer_identity_property_name(auth_context)
+ grpc_auth_context_release(auth_context)
+ return key
+
+def auth_context(Call call):
+ cdef grpc_auth_context* auth_context
+ cdef grpc_auth_property_iterator properties
+ cdef const grpc_auth_property* property
+
+ auth_context = grpc_call_auth_context(call.c_call)
+ if auth_context == NULL:
+ return {}
+ properties = grpc_auth_context_property_iterator(auth_context)
+ py_auth_context = {}
+ while True:
+ property = grpc_auth_property_iterator_next(&properties)
+ if property == NULL:
+ break
+ if property.name != NULL and property.value != NULL:
+ key = <bytes> property.name
+ if key in py_auth_context:
+ py_auth_context[key].append(<bytes>(property.value))
+ else:
+ py_auth_context[key] = [<bytes> property.value]
+ grpc_auth_context_release(auth_context)
+ return py_auth_context
+
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pxd.pxi
new file mode 100644
index 0000000000..b89ed99d97
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pxd.pxi
@@ -0,0 +1,29 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class Server:
+
+ cdef grpc_server *c_server
+
+ cdef bint is_started # start has been called
+ cdef bint is_shutting_down # shutdown has been called
+ cdef bint is_shutdown # notification of complete shutdown received
+ # used at dealloc when user forgets to shutdown
+ cdef CompletionQueue backup_shutdown_queue
+ # TODO(https://github.com/grpc/grpc/issues/15662): Elide this.
+ cdef list references
+ cdef list registered_completion_queues
+
+ cdef _c_shutdown(self, CompletionQueue queue, tag)
+ cdef notify_shutdown_complete(self)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pyx.pxi
new file mode 100644
index 0000000000..29dabec61d
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -0,0 +1,165 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Server:
+
+ def __cinit__(self, object arguments, bint xds):
+ fork_handlers_and_grpc_init()
+ self.references = []
+ self.registered_completion_queues = []
+ self.is_started = False
+ self.is_shutting_down = False
+ self.is_shutdown = False
+ self.c_server = NULL
+ cdef _ChannelArgs channel_args = _ChannelArgs(arguments)
+ self.c_server = grpc_server_create(channel_args.c_args(), NULL)
+ cdef grpc_server_xds_status_notifier notifier
+ notifier.on_serving_status_update = NULL
+ notifier.user_data = NULL
+ if xds:
+ grpc_server_set_config_fetcher(self.c_server,
+ grpc_server_config_fetcher_xds_create(notifier, channel_args.c_args()))
+ self.references.append(arguments)
+
+ def request_call(
+ self, CompletionQueue call_queue not None,
+ CompletionQueue server_queue not None, tag):
+ if not self.is_started or self.is_shutting_down:
+ raise ValueError("server must be started and not shutting down")
+ if server_queue not in self.registered_completion_queues:
+ raise ValueError("server_queue must be a registered completion queue")
+ cdef _RequestCallTag request_call_tag = _RequestCallTag(tag)
+ request_call_tag.prepare()
+ cpython.Py_INCREF(request_call_tag)
+ return grpc_server_request_call(
+ self.c_server, &request_call_tag.call.c_call,
+ &request_call_tag.call_details.c_details,
+ &request_call_tag.c_invocation_metadata,
+ call_queue.c_completion_queue, server_queue.c_completion_queue,
+ <cpython.PyObject *>request_call_tag)
+
+ def register_completion_queue(
+ self, CompletionQueue queue not None):
+ if self.is_started:
+ raise ValueError("cannot register completion queues after start")
+ with nogil:
+ grpc_server_register_completion_queue(
+ self.c_server, queue.c_completion_queue, NULL)
+ self.registered_completion_queues.append(queue)
+
+ def start(self, backup_queue=True):
+ """Start the Cython gRPC Server.
+
+ Args:
+ backup_queue: a bool indicates whether to spawn a backup completion
+ queue. In the case that no CQ is bound to the server, and the shutdown
+ of server becomes un-observable.
+ """
+ if self.is_started:
+ raise ValueError("the server has already started")
+ if backup_queue:
+ self.backup_shutdown_queue = CompletionQueue(shutdown_cq=True)
+ self.register_completion_queue(self.backup_shutdown_queue)
+ self.is_started = True
+ with nogil:
+ grpc_server_start(self.c_server)
+ if backup_queue:
+ # Ensure the core has gotten a chance to do the start-up work
+ self.backup_shutdown_queue.poll(deadline=time.time())
+
+ def add_http2_port(self, bytes address,
+ ServerCredentials server_credentials=None):
+ address = str_to_bytes(address)
+ self.references.append(address)
+ cdef int result
+ cdef char *address_c_string = address
+ if server_credentials is not None:
+ self.references.append(server_credentials)
+ with nogil:
+ result = grpc_server_add_http2_port(
+ self.c_server, address_c_string, server_credentials.c_credentials)
+ else:
+ with nogil:
+ creds = grpc_insecure_server_credentials_create()
+ result = grpc_server_add_http2_port(self.c_server,
+ address_c_string, creds)
+ grpc_server_credentials_release(creds)
+ return result
+
+ cdef _c_shutdown(self, CompletionQueue queue, tag):
+ self.is_shutting_down = True
+ cdef _ServerShutdownTag server_shutdown_tag = _ServerShutdownTag(tag, self)
+ cpython.Py_INCREF(server_shutdown_tag)
+ with nogil:
+ grpc_server_shutdown_and_notify(
+ self.c_server, queue.c_completion_queue,
+ <cpython.PyObject *>server_shutdown_tag)
+
+ def shutdown(self, CompletionQueue queue not None, tag):
+ if queue.is_shutting_down:
+ raise ValueError("queue must be live")
+ elif not self.is_started:
+ raise ValueError("the server hasn't started yet")
+ elif self.is_shutting_down:
+ return
+ elif queue not in self.registered_completion_queues:
+ raise ValueError("expected registered completion queue")
+ else:
+ self._c_shutdown(queue, tag)
+
+ cdef notify_shutdown_complete(self):
+ # called only after our server shutdown tag has emerged from a completion
+ # queue.
+ self.is_shutdown = True
+
+ def cancel_all_calls(self):
+ if not self.is_shutting_down:
+ raise UsageError("the server must be shutting down to cancel all calls")
+ elif self.is_shutdown:
+ return
+ else:
+ with nogil:
+ grpc_server_cancel_all_calls(self.c_server)
+
+ # TODO(https://github.com/grpc/grpc/issues/17515) Determine what, if any,
+ # portion of this is safe to call from __dealloc__, and potentially remove
+ # backup_shutdown_queue.
+ def destroy(self):
+ if self.c_server != NULL:
+ if not self.is_started:
+ pass
+ elif self.is_shutdown:
+ pass
+ elif not self.is_shutting_down:
+ if self.backup_shutdown_queue is None:
+ raise InternalError('Server shutdown failed: no completion queue.')
+ else:
+ # the user didn't call shutdown - use our backup queue
+ self._c_shutdown(self.backup_shutdown_queue, None)
+ # and now we wait
+ while not self.is_shutdown:
+ self.backup_shutdown_queue.poll()
+ else:
+ # We're in the process of shutting down, but have not shutdown; can't do
+ # much but repeatedly release the GIL and wait
+ while not self.is_shutdown:
+ time.sleep(0)
+ with nogil:
+ grpc_server_destroy(self.c_server)
+ self.c_server = NULL
+
+ def __dealloc__(self):
+ if self.c_server == NULL:
+ grpc_shutdown()
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pxd.pxi
new file mode 100644
index 0000000000..d8ba1ea9bd
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pxd.pxi
@@ -0,0 +1,58 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _Tag:
+
+ cdef object event(self, grpc_event c_event)
+
+
+cdef class _ConnectivityTag(_Tag):
+
+ cdef readonly object _user_tag
+
+ cdef ConnectivityEvent event(self, grpc_event c_event)
+
+
+cdef class _RequestCallTag(_Tag):
+
+ cdef readonly object _user_tag
+ cdef Call call
+ cdef CallDetails call_details
+ cdef grpc_metadata_array c_invocation_metadata
+
+ cdef void prepare(self) except *
+ cdef RequestCallEvent event(self, grpc_event c_event)
+
+
+cdef class _BatchOperationTag(_Tag):
+
+ cdef object _user_tag
+ cdef readonly object _operations
+ cdef readonly object _retained_call
+ cdef grpc_op *c_ops
+ cdef size_t c_nops
+
+ cdef void prepare(self) except *
+ cdef BatchOperationEvent event(self, grpc_event c_event)
+
+
+cdef class _ServerShutdownTag(_Tag):
+
+ cdef readonly object _user_tag
+ # This allows CompletionQueue to notify the Python Server object that the
+ # underlying GRPC core server has shutdown
+ cdef readonly Server _shutting_down_server
+
+ cdef ServerShutdownEvent event(self, grpc_event c_event)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pyx.pxi
new file mode 100644
index 0000000000..e80dc88767
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/tag.pyx.pxi
@@ -0,0 +1,88 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _Tag:
+
+ cdef object event(self, grpc_event c_event):
+ raise NotImplementedError()
+
+
+cdef class _ConnectivityTag(_Tag):
+
+ def __cinit__(self, user_tag):
+ self._user_tag = user_tag
+
+ cdef ConnectivityEvent event(self, grpc_event c_event):
+ return ConnectivityEvent(c_event.type, c_event.success, self._user_tag)
+
+
+cdef class _RequestCallTag(_Tag):
+
+ def __cinit__(self, user_tag):
+ self._user_tag = user_tag
+ self.call = None
+ self.call_details = None
+
+ cdef void prepare(self) except *:
+ self.call = Call()
+ self.call_details = CallDetails()
+ grpc_metadata_array_init(&self.c_invocation_metadata)
+
+ cdef RequestCallEvent event(self, grpc_event c_event):
+ cdef tuple invocation_metadata = _metadata(&self.c_invocation_metadata)
+ grpc_metadata_array_destroy(&self.c_invocation_metadata)
+ return RequestCallEvent(
+ c_event.type, c_event.success, self._user_tag, self.call,
+ self.call_details, invocation_metadata)
+
+
+cdef class _BatchOperationTag:
+
+ def __cinit__(self, user_tag, operations, call):
+ self._user_tag = user_tag
+ self._operations = operations
+ self._retained_call = call
+
+ cdef void prepare(self) except *:
+ cdef Operation operation
+ self.c_nops = 0 if self._operations is None else len(self._operations)
+ if 0 < self.c_nops:
+ self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
+ for index, operation in enumerate(self._operations):
+ operation.c()
+ self.c_ops[index] = operation.c_op
+
+ cdef BatchOperationEvent event(self, grpc_event c_event):
+ cdef Operation operation
+ if 0 < self.c_nops:
+ for operation in self._operations:
+ operation.un_c()
+ gpr_free(self.c_ops)
+ return BatchOperationEvent(
+ c_event.type, c_event.success, self._user_tag, self._operations)
+ else:
+ return BatchOperationEvent(
+ c_event.type, c_event.success, self._user_tag, ())
+
+
+cdef class _ServerShutdownTag(_Tag):
+
+ def __cinit__(self, user_tag, shutting_down_server):
+ self._user_tag = user_tag
+ self._shutting_down_server = shutting_down_server
+
+ cdef ServerShutdownEvent event(self, grpc_event c_event):
+ self._shutting_down_server.notify_shutdown_complete()
+ return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag)
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/thread.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/thread.pyx.pxi
new file mode 100644
index 0000000000..be4cb8b9a8
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/thread.pyx.pxi
@@ -0,0 +1,59 @@
+# Copyright 2020 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+def _contextvars_supported():
+ """Determines if the contextvars module is supported.
+
+ We use a 'try it and see if it works approach' here rather than predicting
+ based on interpreter version in order to support older interpreters that
+ may have a backported module based on, e.g. `threading.local`.
+
+ Returns:
+ A bool indicating whether `contextvars` are supported in the current
+ environment.
+ """
+ try:
+ import contextvars
+ return True
+ except ImportError:
+ return False
+
+
+def _run_with_context(target):
+ """Runs a callable with contextvars propagated.
+
+ If contextvars are supported, the calling thread's context will be copied
+ and propagated. If they are not supported, this function is equivalent
+ to the identity function.
+
+ Args:
+ target: A callable object to wrap.
+ Returns:
+ A callable object with the same signature as `target` but with
+ contextvars propagated.
+ """
+
+
+if _contextvars_supported():
+ import contextvars
+ def _run_with_context(target):
+ ctx = contextvars.copy_context()
+ def _run(*args):
+ ctx.run(target, *args)
+ return _run
+else:
+ def _run_with_context(target):
+ def _run(*args):
+ target(*args)
+ return _run
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pxd.pxi
new file mode 100644
index 0000000000..c46e8a98b0
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pxd.pxi
@@ -0,0 +1,19 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef gpr_timespec _timespec_from_time(object time) except *
+
+
+cdef double _time_from_timespec(gpr_timespec timespec) except *
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pyx.pxi
new file mode 100644
index 0000000000..6d181bb1d6
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/time.pyx.pxi
@@ -0,0 +1,29 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef gpr_timespec _timespec_from_time(object time) except *:
+ if time is None:
+ return gpr_inf_future(GPR_CLOCK_REALTIME)
+ else:
+ return gpr_time_from_nanos(
+ <int64_t>(<double>time * GPR_NS_PER_SEC),
+ GPR_CLOCK_REALTIME,
+ )
+
+
+cdef double _time_from_timespec(gpr_timespec timespec) except *:
+ cdef gpr_timespec real_timespec = gpr_convert_clock_type(
+ timespec, GPR_CLOCK_REALTIME)
+ return gpr_timespec_to_micros(real_timespec) / GPR_US_PER_SEC
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pxd.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pxd.pxi
new file mode 100644
index 0000000000..c96e5cb669
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pxd.pxi
@@ -0,0 +1,23 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef void* _copy_pointer(void* pointer)
+
+cdef void _destroy_pointer(void* pointer)
+
+cdef int _compare_pointer(void* first_pointer, void* second_pointer)
+
+
+cdef grpc_arg_pointer_vtable default_vtable
diff --git a/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pyx.pxi b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pyx.pxi
new file mode 100644
index 0000000000..da4b81bd97
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/_cygrpc/vtable.pyx.pxi
@@ -0,0 +1,36 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(https://github.com/grpc/grpc/issues/15662): Reform this.
+cdef void* _copy_pointer(void* pointer):
+ return pointer
+
+
+# TODO(https://github.com/grpc/grpc/issues/15662): Reform this.
+cdef void _destroy_pointer(void* pointer):
+ pass
+
+
+cdef int _compare_pointer(void* first_pointer, void* second_pointer):
+ if first_pointer < second_pointer:
+ return -1
+ elif first_pointer > second_pointer:
+ return 1
+ else:
+ return 0
+
+cdef grpc_arg_pointer_vtable default_vtable
+default_vtable.copy = &_copy_pointer
+default_vtable.destroy = &_destroy_pointer
+default_vtable.cmp = &_compare_pointer
diff --git a/contrib/python/grpcio/py2/grpc/_cython/cygrpc.pxd b/contrib/python/grpcio/py2/grpc/_cython/cygrpc.pxd
new file mode 100644
index 0000000000..ed04119143
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/cygrpc.pxd
@@ -0,0 +1,50 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+cimport cpython
+
+include "_cygrpc/grpc.pxi"
+
+include "_cygrpc/arguments.pxd.pxi"
+include "_cygrpc/call.pxd.pxi"
+include "_cygrpc/channel.pxd.pxi"
+include "_cygrpc/credentials.pxd.pxi"
+include "_cygrpc/completion_queue.pxd.pxi"
+include "_cygrpc/event.pxd.pxi"
+include "_cygrpc/metadata.pxd.pxi"
+include "_cygrpc/operation.pxd.pxi"
+include "_cygrpc/propagation_bits.pxd.pxi"
+include "_cygrpc/records.pxd.pxi"
+include "_cygrpc/security.pxd.pxi"
+include "_cygrpc/server.pxd.pxi"
+include "_cygrpc/tag.pxd.pxi"
+include "_cygrpc/time.pxd.pxi"
+include "_cygrpc/vtable.pxd.pxi"
+include "_cygrpc/_hooks.pxd.pxi"
+
+
+include "_cygrpc/grpc_gevent.pxd.pxi"
+
+IF UNAME_SYSNAME != "Windows":
+ include "_cygrpc/fork_posix.pxd.pxi"
+
+# Following pxi files are part of the Aio module
+include "_cygrpc/aio/completion_queue.pxd.pxi"
+include "_cygrpc/aio/rpc_status.pxd.pxi"
+include "_cygrpc/aio/grpc_aio.pxd.pxi"
+include "_cygrpc/aio/callback_common.pxd.pxi"
+include "_cygrpc/aio/call.pxd.pxi"
+include "_cygrpc/aio/channel.pxd.pxi"
+include "_cygrpc/aio/server.pxd.pxi"
diff --git a/contrib/python/grpcio/py2/grpc/_cython/cygrpc.pyx b/contrib/python/grpcio/py2/grpc/_cython/cygrpc.pyx
new file mode 100644
index 0000000000..c7925676c3
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_cython/cygrpc.pyx
@@ -0,0 +1,94 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+cimport cpython
+
+import logging
+import os
+import sys
+import threading
+import time
+
+import grpc
+
+try:
+ import asyncio
+except ImportError:
+ # TODO(https://github.com/grpc/grpc/issues/19728) Improve how Aio Cython is
+ # distributed without breaking none compatible Python versions. For now, if
+ # Asyncio package is not available we just skip it.
+ pass
+
+# The only copy of Python logger for the Cython extension
+_LOGGER = logging.getLogger(__name__)
+
+# TODO(atash): figure out why the coverage tool gets confused about the Cython
+# coverage plugin when the following files don't have a '.pxi' suffix.
+include "_cygrpc/grpc_string.pyx.pxi"
+include "_cygrpc/arguments.pyx.pxi"
+include "_cygrpc/call.pyx.pxi"
+include "_cygrpc/channel.pyx.pxi"
+include "_cygrpc/channelz.pyx.pxi"
+include "_cygrpc/csds.pyx.pxi"
+include "_cygrpc/credentials.pyx.pxi"
+include "_cygrpc/completion_queue.pyx.pxi"
+include "_cygrpc/event.pyx.pxi"
+include "_cygrpc/metadata.pyx.pxi"
+include "_cygrpc/operation.pyx.pxi"
+include "_cygrpc/propagation_bits.pyx.pxi"
+include "_cygrpc/records.pyx.pxi"
+include "_cygrpc/security.pyx.pxi"
+include "_cygrpc/server.pyx.pxi"
+include "_cygrpc/tag.pyx.pxi"
+include "_cygrpc/time.pyx.pxi"
+include "_cygrpc/vtable.pyx.pxi"
+include "_cygrpc/_hooks.pyx.pxi"
+
+include "_cygrpc/grpc_gevent.pyx.pxi"
+
+include "_cygrpc/thread.pyx.pxi"
+
+IF UNAME_SYSNAME == "Windows":
+ include "_cygrpc/fork_windows.pyx.pxi"
+ELSE:
+ include "_cygrpc/fork_posix.pyx.pxi"
+
+# Following pxi files are part of the Aio module
+include "_cygrpc/aio/common.pyx.pxi"
+include "_cygrpc/aio/rpc_status.pyx.pxi"
+include "_cygrpc/aio/completion_queue.pyx.pxi"
+include "_cygrpc/aio/callback_common.pyx.pxi"
+include "_cygrpc/aio/grpc_aio.pyx.pxi"
+include "_cygrpc/aio/call.pyx.pxi"
+include "_cygrpc/aio/channel.pyx.pxi"
+include "_cygrpc/aio/server.pyx.pxi"
+
+
+#
+# initialize gRPC
+#
+cdef extern from "Python.h":
+
+ int PyEval_InitThreads()
+
+cdef _initialize():
+ # We have Python callbacks called by c-core threads, this ensures the GIL
+ # is initialized.
+ PyEval_InitThreads()
+ import ssl
+ grpc_dont_init_openssl()
+ # Load Arcadia certs in ComputePemRootCerts and do not override here.
+
+_initialize()
diff --git a/contrib/python/grpcio/py2/grpc/_grpcio_metadata.py b/contrib/python/grpcio/py2/grpc/_grpcio_metadata.py
new file mode 100644
index 0000000000..ac2ab89f4c
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_grpcio_metadata.py
@@ -0,0 +1 @@
+__version__ = """1.50.0""" \ No newline at end of file
diff --git a/contrib/python/grpcio/py2/grpc/_interceptor.py b/contrib/python/grpcio/py2/grpc/_interceptor.py
new file mode 100644
index 0000000000..ee63cb3145
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_interceptor.py
@@ -0,0 +1,562 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Implementation of gRPC Python interceptors."""
+
+import collections
+import sys
+
+import grpc
+
+
+class _ServicePipeline(object):
+
+ def __init__(self, interceptors):
+ self.interceptors = tuple(interceptors)
+
+ def _continuation(self, thunk, index):
+ return lambda context: self._intercept_at(thunk, index, context)
+
+ def _intercept_at(self, thunk, index, context):
+ if index < len(self.interceptors):
+ interceptor = self.interceptors[index]
+ thunk = self._continuation(thunk, index + 1)
+ return interceptor.intercept_service(thunk, context)
+ else:
+ return thunk(context)
+
+ def execute(self, thunk, context):
+ return self._intercept_at(thunk, 0, context)
+
+
+def service_pipeline(interceptors):
+ return _ServicePipeline(interceptors) if interceptors else None
+
+
+class _ClientCallDetails(
+ collections.namedtuple('_ClientCallDetails',
+ ('method', 'timeout', 'metadata', 'credentials',
+ 'wait_for_ready', 'compression')),
+ grpc.ClientCallDetails):
+ pass
+
+
+def _unwrap_client_call_details(call_details, default_details):
+ try:
+ method = call_details.method
+ except AttributeError:
+ method = default_details.method
+
+ try:
+ timeout = call_details.timeout
+ except AttributeError:
+ timeout = default_details.timeout
+
+ try:
+ metadata = call_details.metadata
+ except AttributeError:
+ metadata = default_details.metadata
+
+ try:
+ credentials = call_details.credentials
+ except AttributeError:
+ credentials = default_details.credentials
+
+ try:
+ wait_for_ready = call_details.wait_for_ready
+ except AttributeError:
+ wait_for_ready = default_details.wait_for_ready
+
+ try:
+ compression = call_details.compression
+ except AttributeError:
+ compression = default_details.compression
+
+ return method, timeout, metadata, credentials, wait_for_ready, compression
+
+
+class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
+
+ def __init__(self, exception, traceback):
+ super(_FailureOutcome, self).__init__()
+ self._exception = exception
+ self._traceback = traceback
+
+ def initial_metadata(self):
+ return None
+
+ def trailing_metadata(self):
+ return None
+
+ def code(self):
+ return grpc.StatusCode.INTERNAL
+
+ def details(self):
+ return 'Exception raised while intercepting the RPC'
+
+ def cancel(self):
+ return False
+
+ def cancelled(self):
+ return False
+
+ def is_active(self):
+ return False
+
+ def time_remaining(self):
+ return None
+
+ def running(self):
+ return False
+
+ def done(self):
+ return True
+
+ def result(self, ignored_timeout=None):
+ raise self._exception
+
+ def exception(self, ignored_timeout=None):
+ return self._exception
+
+ def traceback(self, ignored_timeout=None):
+ return self._traceback
+
+ def add_callback(self, unused_callback):
+ return False
+
+ def add_done_callback(self, fn):
+ fn(self)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ raise self._exception
+
+ def next(self):
+ return self.__next__()
+
+
+class _UnaryOutcome(grpc.Call, grpc.Future):
+
+ def __init__(self, response, call):
+ self._response = response
+ self._call = call
+
+ def initial_metadata(self):
+ return self._call.initial_metadata()
+
+ def trailing_metadata(self):
+ return self._call.trailing_metadata()
+
+ def code(self):
+ return self._call.code()
+
+ def details(self):
+ return self._call.details()
+
+ def is_active(self):
+ return self._call.is_active()
+
+ def time_remaining(self):
+ return self._call.time_remaining()
+
+ def cancel(self):
+ return self._call.cancel()
+
+ def add_callback(self, callback):
+ return self._call.add_callback(callback)
+
+ def cancelled(self):
+ return False
+
+ def running(self):
+ return False
+
+ def done(self):
+ return True
+
+ def result(self, ignored_timeout=None):
+ return self._response
+
+ def exception(self, ignored_timeout=None):
+ return None
+
+ def traceback(self, ignored_timeout=None):
+ return None
+
+ def add_done_callback(self, fn):
+ fn(self)
+
+
+class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+
+ def __init__(self, thunk, method, interceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ response, ignored_call = self._with_call(request,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+ return response
+
+ def _with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ try:
+ response, call = self._thunk(new_method).with_call(
+ request,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+ return _UnaryOutcome(response, call)
+ except grpc.RpcError as rpc_error:
+ return rpc_error
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+ call = self._interceptor.intercept_unary_unary(continuation,
+ client_call_details,
+ request)
+ return call.result(), call
+
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ return self._with_call(request,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method).future(
+ request,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_unary_unary(
+ continuation, client_call_details, request)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+
+ def __init__(self, thunk, method, interceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method)(request,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_unary_stream(
+ continuation, client_call_details, request)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+
+ def __init__(self, thunk, method, interceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ response, ignored_call = self._with_call(request_iterator,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+ return response
+
+ def _with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request_iterator):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ try:
+ response, call = self._thunk(new_method).with_call(
+ request_iterator,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+ return _UnaryOutcome(response, call)
+ except grpc.RpcError as rpc_error:
+ return rpc_error
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+ call = self._interceptor.intercept_stream_unary(continuation,
+ client_call_details,
+ request_iterator)
+ return call.result(), call
+
+ def with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ return self._with_call(request_iterator,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+
+ def future(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request_iterator):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method).future(
+ request_iterator,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_stream_unary(
+ continuation, client_call_details, request_iterator)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+
+ def __init__(self, thunk, method, interceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request_iterator):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method)(request_iterator,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_stream_stream(
+ continuation, client_call_details, request_iterator)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _Channel(grpc.Channel):
+
+ def __init__(self, channel, interceptor):
+ self._channel = channel
+ self._interceptor = interceptor
+
+ def subscribe(self, callback, try_to_connect=False):
+ self._channel.subscribe(callback, try_to_connect=try_to_connect)
+
+ def unsubscribe(self, callback):
+ self._channel.unsubscribe(callback)
+
+ def unary_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ thunk = lambda m: self._channel.unary_unary(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.UnaryUnaryClientInterceptor):
+ return _UnaryUnaryMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def unary_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ thunk = lambda m: self._channel.unary_stream(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.UnaryStreamClientInterceptor):
+ return _UnaryStreamMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def stream_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ thunk = lambda m: self._channel.stream_unary(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.StreamUnaryClientInterceptor):
+ return _StreamUnaryMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def stream_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ thunk = lambda m: self._channel.stream_stream(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.StreamStreamClientInterceptor):
+ return _StreamStreamMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def _close(self):
+ self._channel.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._close()
+ return False
+
+ def close(self):
+ self._channel.close()
+
+
+def intercept_channel(channel, *interceptors):
+ for interceptor in reversed(list(interceptors)):
+ if not isinstance(interceptor, grpc.UnaryUnaryClientInterceptor) and \
+ not isinstance(interceptor, grpc.UnaryStreamClientInterceptor) and \
+ not isinstance(interceptor, grpc.StreamUnaryClientInterceptor) and \
+ not isinstance(interceptor, grpc.StreamStreamClientInterceptor):
+ raise TypeError('interceptor must be '
+ 'grpc.UnaryUnaryClientInterceptor or '
+ 'grpc.UnaryStreamClientInterceptor or '
+ 'grpc.StreamUnaryClientInterceptor or '
+ 'grpc.StreamStreamClientInterceptor or ')
+ channel = _Channel(channel, interceptor)
+ return channel
diff --git a/contrib/python/grpcio/py2/grpc/_plugin_wrapping.py b/contrib/python/grpcio/py2/grpc/_plugin_wrapping.py
new file mode 100644
index 0000000000..ad74b256a3
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_plugin_wrapping.py
@@ -0,0 +1,113 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import logging
+import threading
+
+import grpc
+from grpc import _common
+from grpc._cython import cygrpc
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class _AuthMetadataContext(
+ collections.namedtuple('AuthMetadataContext', (
+ 'service_url',
+ 'method_name',
+ )), grpc.AuthMetadataContext):
+ pass
+
+
+class _CallbackState(object):
+
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.called = False
+ self.exception = None
+
+
+class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback):
+
+ def __init__(self, state, callback):
+ self._state = state
+ self._callback = callback
+
+ def __call__(self, metadata, error):
+ with self._state.lock:
+ if self._state.exception is None:
+ if self._state.called:
+ raise RuntimeError(
+ 'AuthMetadataPluginCallback invoked more than once!')
+ else:
+ self._state.called = True
+ else:
+ raise RuntimeError(
+ 'AuthMetadataPluginCallback raised exception "{}"!'.format(
+ self._state.exception))
+ if error is None:
+ self._callback(metadata, cygrpc.StatusCode.ok, None)
+ else:
+ self._callback(None, cygrpc.StatusCode.internal,
+ _common.encode(str(error)))
+
+
+class _Plugin(object):
+
+ def __init__(self, metadata_plugin):
+ self._metadata_plugin = metadata_plugin
+ self._stored_ctx = None
+
+ try:
+ import contextvars # pylint: disable=wrong-import-position
+
+ # The plugin may be invoked on a thread created by Core, which will not
+ # have the context propagated. This context is stored and installed in
+ # the thread invoking the plugin.
+ self._stored_ctx = contextvars.copy_context()
+ except ImportError:
+ # Support versions predating contextvars.
+ pass
+
+ def __call__(self, service_url, method_name, callback):
+ context = _AuthMetadataContext(_common.decode(service_url),
+ _common.decode(method_name))
+ callback_state = _CallbackState()
+ try:
+ self._metadata_plugin(
+ context, _AuthMetadataPluginCallback(callback_state, callback))
+ except Exception as exception: # pylint: disable=broad-except
+ _LOGGER.exception(
+ 'AuthMetadataPluginCallback "%s" raised exception!',
+ self._metadata_plugin)
+ with callback_state.lock:
+ callback_state.exception = exception
+ if callback_state.called:
+ return
+ callback(None, cygrpc.StatusCode.internal,
+ _common.encode(str(exception)))
+
+
+def metadata_plugin_call_credentials(metadata_plugin, name):
+ if name is None:
+ try:
+ effective_name = metadata_plugin.__name__
+ except AttributeError:
+ effective_name = metadata_plugin.__class__.__name__
+ else:
+ effective_name = name
+ return grpc.CallCredentials(
+ cygrpc.MetadataPluginCallCredentials(_Plugin(metadata_plugin),
+ _common.encode(effective_name)))
diff --git a/contrib/python/grpcio/py2/grpc/_runtime_protos.py b/contrib/python/grpcio/py2/grpc/_runtime_protos.py
new file mode 100644
index 0000000000..2a3e1d459a
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_runtime_protos.py
@@ -0,0 +1,155 @@
+# Copyright 2020 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+_REQUIRED_SYMBOLS = ("_protos", "_services", "_protos_and_services")
+_MINIMUM_VERSION = (3, 5, 0)
+
+_UNINSTALLED_TEMPLATE = "Install the grpcio-tools package (1.32.0+) to use the {} function."
+_VERSION_ERROR_TEMPLATE = "The {} function is only on available on Python 3.X interpreters."
+
+
+def _has_runtime_proto_symbols(mod):
+ return all(hasattr(mod, sym) for sym in _REQUIRED_SYMBOLS)
+
+
+def _is_grpc_tools_importable():
+ try:
+ import grpc_tools # pylint: disable=unused-import
+ return True
+ except ImportError as e:
+ # NOTE: It's possible that we're encountering a transitive ImportError, so
+ # we check for that and re-raise if so.
+ if "grpc_tools" not in e.args[0]:
+ raise
+ return False
+
+
+def _call_with_lazy_import(fn_name, protobuf_path):
+ """Calls one of the three functions, lazily importing grpc_tools.
+
+ Args:
+ fn_name: The name of the function to import from grpc_tools.protoc.
+ protobuf_path: The path to import.
+
+ Returns:
+ The appropriate module object.
+ """
+ if sys.version_info < _MINIMUM_VERSION:
+ raise NotImplementedError(_VERSION_ERROR_TEMPLATE.format(fn_name))
+ else:
+ if not _is_grpc_tools_importable():
+ raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name))
+ import grpc_tools.protoc
+ if _has_runtime_proto_symbols(grpc_tools.protoc):
+ fn = getattr(grpc_tools.protoc, '_' + fn_name)
+ return fn(protobuf_path)
+ else:
+ raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name))
+
+
+def protos(protobuf_path): # pylint: disable=unused-argument
+ """Returns a module generated by the indicated .proto file.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ Use this function to retrieve classes corresponding to message
+ definitions in the .proto file.
+
+ To inspect the contents of the returned module, use the dir function.
+ For example:
+
+ ```
+ protos = grpc.protos("foo.proto")
+ print(dir(protos))
+ ```
+
+ The returned module object corresponds to the _pb2.py file generated
+ by protoc. The path is expected to be relative to an entry on sys.path
+ and all transitive dependencies of the file should also be resolveable
+ from an entry on sys.path.
+
+ To completely disable the machinery behind this function, set the
+ GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
+
+ Args:
+ protobuf_path: The path to the .proto file on the filesystem. This path
+ must be resolveable from an entry on sys.path and so must all of its
+ transitive dependencies.
+
+ Returns:
+ A module object corresponding to the message code for the indicated
+ .proto file. Equivalent to a generated _pb2.py file.
+ """
+ return _call_with_lazy_import("protos", protobuf_path)
+
+
+def services(protobuf_path): # pylint: disable=unused-argument
+ """Returns a module generated by the indicated .proto file.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ Use this function to retrieve classes and functions corresponding to
+ service definitions in the .proto file, including both stub and servicer
+ definitions.
+
+ To inspect the contents of the returned module, use the dir function.
+ For example:
+
+ ```
+ services = grpc.services("foo.proto")
+ print(dir(services))
+ ```
+
+ The returned module object corresponds to the _pb2_grpc.py file generated
+ by protoc. The path is expected to be relative to an entry on sys.path
+ and all transitive dependencies of the file should also be resolveable
+ from an entry on sys.path.
+
+ To completely disable the machinery behind this function, set the
+ GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
+
+ Args:
+ protobuf_path: The path to the .proto file on the filesystem. This path
+ must be resolveable from an entry on sys.path and so must all of its
+ transitive dependencies.
+
+ Returns:
+ A module object corresponding to the stub/service code for the indicated
+ .proto file. Equivalent to a generated _pb2_grpc.py file.
+ """
+ return _call_with_lazy_import("services", protobuf_path)
+
+
+def protos_and_services(protobuf_path): # pylint: disable=unused-argument
+ """Returns a 2-tuple of modules corresponding to protos and services.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ The return value of this function is equivalent to a call to protos and a
+ call to services.
+
+ To completely disable the machinery behind this function, set the
+ GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
+
+ Args:
+ protobuf_path: The path to the .proto file on the filesystem. This path
+ must be resolveable from an entry on sys.path and so must all of its
+ transitive dependencies.
+
+ Returns:
+ A 2-tuple of module objects corresponding to (protos(path), services(path)).
+ """
+ return _call_with_lazy_import("protos_and_services", protobuf_path)
diff --git a/contrib/python/grpcio/py2/grpc/_server.py b/contrib/python/grpcio/py2/grpc/_server.py
new file mode 100644
index 0000000000..58ab14feb3
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_server.py
@@ -0,0 +1,1003 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Service-side implementation of gRPC Python."""
+
+import collections
+from concurrent import futures
+import enum
+import logging
+import threading
+import time
+
+import grpc
+from grpc import _common
+from grpc import _compression
+from grpc import _interceptor
+from grpc._cython import cygrpc
+import six
+
+_LOGGER = logging.getLogger(__name__)
+
+_SHUTDOWN_TAG = 'shutdown'
+_REQUEST_CALL_TAG = 'request_call'
+
+_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
+_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
+_RECEIVE_MESSAGE_TOKEN = 'receive_message'
+_SEND_MESSAGE_TOKEN = 'send_message'
+_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
+ 'send_initial_metadata * send_message')
+_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
+_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
+ 'send_initial_metadata * send_status_from_server')
+
+_OPEN = 'open'
+_CLOSED = 'closed'
+_CANCELLED = 'cancelled'
+
+_EMPTY_FLAGS = 0
+
+_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0
+_INF_TIMEOUT = 1e9
+
+
+def _serialized_request(request_event):
+ return request_event.batch_operations[0].message()
+
+
+def _application_code(code):
+ cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
+ return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
+
+
+def _completion_code(state):
+ if state.code is None:
+ return cygrpc.StatusCode.ok
+ else:
+ return _application_code(state.code)
+
+
+def _abortion_code(state, code):
+ if state.code is None:
+ return code
+ else:
+ return _application_code(state.code)
+
+
+def _details(state):
+ return b'' if state.details is None else state.details
+
+
+class _HandlerCallDetails(
+ collections.namedtuple('_HandlerCallDetails', (
+ 'method',
+ 'invocation_metadata',
+ )), grpc.HandlerCallDetails):
+ pass
+
+
+class _RPCState(object):
+
+ def __init__(self):
+ self.condition = threading.Condition()
+ self.due = set()
+ self.request = None
+ self.client = _OPEN
+ self.initial_metadata_allowed = True
+ self.compression_algorithm = None
+ self.disable_next_compression = False
+ self.trailing_metadata = None
+ self.code = None
+ self.details = None
+ self.statused = False
+ self.rpc_errors = []
+ self.callbacks = []
+ self.aborted = False
+
+
+def _raise_rpc_error(state):
+ rpc_error = grpc.RpcError()
+ state.rpc_errors.append(rpc_error)
+ raise rpc_error
+
+
+def _possibly_finish_call(state, token):
+ state.due.remove(token)
+ if not _is_rpc_state_active(state) and not state.due:
+ callbacks = state.callbacks
+ state.callbacks = None
+ return state, callbacks
+ else:
+ return None, ()
+
+
+def _send_status_from_server(state, token):
+
+ def send_status_from_server(unused_send_status_from_server_event):
+ with state.condition:
+ return _possibly_finish_call(state, token)
+
+ return send_status_from_server
+
+
+def _get_initial_metadata(state, metadata):
+ with state.condition:
+ if state.compression_algorithm:
+ compression_metadata = (
+ _compression.compression_algorithm_to_metadata(
+ state.compression_algorithm),)
+ if metadata is None:
+ return compression_metadata
+ else:
+ return compression_metadata + tuple(metadata)
+ else:
+ return metadata
+
+
+def _get_initial_metadata_operation(state, metadata):
+ operation = cygrpc.SendInitialMetadataOperation(
+ _get_initial_metadata(state, metadata), _EMPTY_FLAGS)
+ return operation
+
+
+def _abort(state, call, code, details):
+ if state.client is not _CANCELLED:
+ effective_code = _abortion_code(state, code)
+ effective_details = details if state.details is None else state.details
+ if state.initial_metadata_allowed:
+ operations = (
+ _get_initial_metadata_operation(state, None),
+ cygrpc.SendStatusFromServerOperation(state.trailing_metadata,
+ effective_code,
+ effective_details,
+ _EMPTY_FLAGS),
+ )
+ token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
+ else:
+ operations = (cygrpc.SendStatusFromServerOperation(
+ state.trailing_metadata, effective_code, effective_details,
+ _EMPTY_FLAGS),)
+ token = _SEND_STATUS_FROM_SERVER_TOKEN
+ call.start_server_batch(operations,
+ _send_status_from_server(state, token))
+ state.statused = True
+ state.due.add(token)
+
+
+def _receive_close_on_server(state):
+
+ def receive_close_on_server(receive_close_on_server_event):
+ with state.condition:
+ if receive_close_on_server_event.batch_operations[0].cancelled():
+ state.client = _CANCELLED
+ elif state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
+
+ return receive_close_on_server
+
+
+def _receive_message(state, call, request_deserializer):
+
+ def receive_message(receive_message_event):
+ serialized_request = _serialized_request(receive_message_event)
+ if serialized_request is None:
+ with state.condition:
+ if state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+ else:
+ request = _common.deserialize(serialized_request,
+ request_deserializer)
+ with state.condition:
+ if request is None:
+ _abort(state, call, cygrpc.StatusCode.internal,
+ b'Exception deserializing request!')
+ else:
+ state.request = request
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+
+ return receive_message
+
+
+def _send_initial_metadata(state):
+
+ def send_initial_metadata(unused_send_initial_metadata_event):
+ with state.condition:
+ return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
+
+ return send_initial_metadata
+
+
+def _send_message(state, token):
+
+ def send_message(unused_send_message_event):
+ with state.condition:
+ state.condition.notify_all()
+ return _possibly_finish_call(state, token)
+
+ return send_message
+
+
+class _Context(grpc.ServicerContext):
+
+ def __init__(self, rpc_event, state, request_deserializer):
+ self._rpc_event = rpc_event
+ self._state = state
+ self._request_deserializer = request_deserializer
+
+ def is_active(self):
+ with self._state.condition:
+ return _is_rpc_state_active(self._state)
+
+ def time_remaining(self):
+ return max(self._rpc_event.call_details.deadline - time.time(), 0)
+
+ def cancel(self):
+ self._rpc_event.call.cancel()
+
+ def add_callback(self, callback):
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
+
+ def disable_next_message_compression(self):
+ with self._state.condition:
+ self._state.disable_next_compression = True
+
+ def invocation_metadata(self):
+ return self._rpc_event.invocation_metadata
+
+ def peer(self):
+ return _common.decode(self._rpc_event.call.peer())
+
+ def peer_identities(self):
+ return cygrpc.peer_identities(self._rpc_event.call)
+
+ def peer_identity_key(self):
+ id_key = cygrpc.peer_identity_key(self._rpc_event.call)
+ return id_key if id_key is None else _common.decode(id_key)
+
+ def auth_context(self):
+ return {
+ _common.decode(key): value for key, value in six.iteritems(
+ cygrpc.auth_context(self._rpc_event.call))
+ }
+
+ def set_compression(self, compression):
+ with self._state.condition:
+ self._state.compression_algorithm = compression
+
+ def send_initial_metadata(self, initial_metadata):
+ with self._state.condition:
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ else:
+ if self._state.initial_metadata_allowed:
+ operation = _get_initial_metadata_operation(
+ self._state, initial_metadata)
+ self._rpc_event.call.start_server_batch(
+ (operation,), _send_initial_metadata(self._state))
+ self._state.initial_metadata_allowed = False
+ self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
+ else:
+ raise ValueError('Initial metadata no longer allowed!')
+
+ def set_trailing_metadata(self, trailing_metadata):
+ with self._state.condition:
+ self._state.trailing_metadata = trailing_metadata
+
+ def trailing_metadata(self):
+ return self._state.trailing_metadata
+
+ def abort(self, code, details):
+ # treat OK like other invalid arguments: fail the RPC
+ if code == grpc.StatusCode.OK:
+ _LOGGER.error(
+ 'abort() called with StatusCode.OK; returning UNKNOWN')
+ code = grpc.StatusCode.UNKNOWN
+ details = ''
+ with self._state.condition:
+ self._state.code = code
+ self._state.details = _common.encode(details)
+ self._state.aborted = True
+ raise Exception()
+
+ def abort_with_status(self, status):
+ self._state.trailing_metadata = status.trailing_metadata
+ self.abort(status.code, status.details)
+
+ def set_code(self, code):
+ with self._state.condition:
+ self._state.code = code
+
+ def code(self):
+ return self._state.code
+
+ def set_details(self, details):
+ with self._state.condition:
+ self._state.details = _common.encode(details)
+
+ def details(self):
+ return self._state.details
+
+ def _finalize_state(self):
+ pass
+
+
+class _RequestIterator(object):
+
+ def __init__(self, state, call, request_deserializer):
+ self._state = state
+ self._call = call
+ self._request_deserializer = request_deserializer
+
+ def _raise_or_start_receive_message(self):
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif not _is_rpc_state_active(self._state):
+ raise StopIteration()
+ else:
+ self._call.start_server_batch(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
+ _receive_message(self._state, self._call,
+ self._request_deserializer))
+ self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
+
+ def _look_for_request(self):
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif (self._state.request is None and
+ _RECEIVE_MESSAGE_TOKEN not in self._state.due):
+ raise StopIteration()
+ else:
+ request = self._state.request
+ self._state.request = None
+ return request
+
+ raise AssertionError() # should never run
+
+ def _next(self):
+ with self._state.condition:
+ self._raise_or_start_receive_message()
+ while True:
+ self._state.condition.wait()
+ request = self._look_for_request()
+ if request is not None:
+ return request
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+
+def _unary_request(rpc_event, state, request_deserializer):
+
+ def unary_request():
+ with state.condition:
+ if not _is_rpc_state_active(state):
+ return None
+ else:
+ rpc_event.call.start_server_batch(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
+ _receive_message(state, rpc_event.call,
+ request_deserializer))
+ state.due.add(_RECEIVE_MESSAGE_TOKEN)
+ while True:
+ state.condition.wait()
+ if state.request is None:
+ if state.client is _CLOSED:
+ details = '"{}" requires exactly one request message.'.format(
+ rpc_event.call_details.method)
+ _abort(state, rpc_event.call,
+ cygrpc.StatusCode.unimplemented,
+ _common.encode(details))
+ return None
+ elif state.client is _CANCELLED:
+ return None
+ else:
+ request = state.request
+ state.request = None
+ return request
+
+ return unary_request
+
+
+def _call_behavior(rpc_event,
+ state,
+ behavior,
+ argument,
+ request_deserializer,
+ send_response_callback=None):
+ from grpc import _create_servicer_context
+ with _create_servicer_context(rpc_event, state,
+ request_deserializer) as context:
+ try:
+ response_or_iterator = None
+ if send_response_callback is not None:
+ response_or_iterator = behavior(argument, context,
+ send_response_callback)
+ else:
+ response_or_iterator = behavior(argument, context)
+ return response_or_iterator, True
+ except Exception as exception: # pylint: disable=broad-except
+ with state.condition:
+ if state.aborted:
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ b'RPC Aborted')
+ elif exception not in state.rpc_errors:
+ details = 'Exception calling application: {}'.format(
+ exception)
+ _LOGGER.exception(details)
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ _common.encode(details))
+ return None, False
+
+
+def _take_response_from_response_iterator(rpc_event, state, response_iterator):
+ try:
+ return next(response_iterator), True
+ except StopIteration:
+ return None, True
+ except Exception as exception: # pylint: disable=broad-except
+ with state.condition:
+ if state.aborted:
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ b'RPC Aborted')
+ elif exception not in state.rpc_errors:
+ details = 'Exception iterating responses: {}'.format(exception)
+ _LOGGER.exception(details)
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ _common.encode(details))
+ return None, False
+
+
+def _serialize_response(rpc_event, state, response, response_serializer):
+ serialized_response = _common.serialize(response, response_serializer)
+ if serialized_response is None:
+ with state.condition:
+ _abort(state, rpc_event.call, cygrpc.StatusCode.internal,
+ b'Failed to serialize response!')
+ return None
+ else:
+ return serialized_response
+
+
+def _get_send_message_op_flags_from_state(state):
+ if state.disable_next_compression:
+ return cygrpc.WriteFlag.no_compress
+ else:
+ return _EMPTY_FLAGS
+
+
+def _reset_per_message_state(state):
+ with state.condition:
+ state.disable_next_compression = False
+
+
+def _send_response(rpc_event, state, serialized_response):
+ with state.condition:
+ if not _is_rpc_state_active(state):
+ return False
+ else:
+ if state.initial_metadata_allowed:
+ operations = (
+ _get_initial_metadata_operation(state, None),
+ cygrpc.SendMessageOperation(
+ serialized_response,
+ _get_send_message_op_flags_from_state(state)),
+ )
+ state.initial_metadata_allowed = False
+ token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
+ else:
+ operations = (cygrpc.SendMessageOperation(
+ serialized_response,
+ _get_send_message_op_flags_from_state(state)),)
+ token = _SEND_MESSAGE_TOKEN
+ rpc_event.call.start_server_batch(operations,
+ _send_message(state, token))
+ state.due.add(token)
+ _reset_per_message_state(state)
+ while True:
+ state.condition.wait()
+ if token not in state.due:
+ return _is_rpc_state_active(state)
+
+
+def _status(rpc_event, state, serialized_response):
+ with state.condition:
+ if state.client is not _CANCELLED:
+ code = _completion_code(state)
+ details = _details(state)
+ operations = [
+ cygrpc.SendStatusFromServerOperation(state.trailing_metadata,
+ code, details,
+ _EMPTY_FLAGS),
+ ]
+ if state.initial_metadata_allowed:
+ operations.append(_get_initial_metadata_operation(state, None))
+ if serialized_response is not None:
+ operations.append(
+ cygrpc.SendMessageOperation(
+ serialized_response,
+ _get_send_message_op_flags_from_state(state)))
+ rpc_event.call.start_server_batch(
+ operations,
+ _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
+ state.statused = True
+ _reset_per_message_state(state)
+ state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
+
+
+def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
+ request_deserializer, response_serializer):
+ cygrpc.install_context_from_request_call_event(rpc_event)
+ try:
+ argument = argument_thunk()
+ if argument is not None:
+ response, proceed = _call_behavior(rpc_event, state, behavior,
+ argument, request_deserializer)
+ if proceed:
+ serialized_response = _serialize_response(
+ rpc_event, state, response, response_serializer)
+ if serialized_response is not None:
+ _status(rpc_event, state, serialized_response)
+ finally:
+ cygrpc.uninstall_context()
+
+
+def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
+ request_deserializer, response_serializer):
+ cygrpc.install_context_from_request_call_event(rpc_event)
+
+ def send_response(response):
+ if response is None:
+ _status(rpc_event, state, None)
+ else:
+ serialized_response = _serialize_response(rpc_event, state,
+ response,
+ response_serializer)
+ if serialized_response is not None:
+ _send_response(rpc_event, state, serialized_response)
+
+ try:
+ argument = argument_thunk()
+ if argument is not None:
+ if hasattr(behavior, 'experimental_non_blocking'
+ ) and behavior.experimental_non_blocking:
+ _call_behavior(rpc_event,
+ state,
+ behavior,
+ argument,
+ request_deserializer,
+ send_response_callback=send_response)
+ else:
+ response_iterator, proceed = _call_behavior(
+ rpc_event, state, behavior, argument, request_deserializer)
+ if proceed:
+ _send_message_callback_to_blocking_iterator_adapter(
+ rpc_event, state, send_response, response_iterator)
+ finally:
+ cygrpc.uninstall_context()
+
+
+def _is_rpc_state_active(state):
+ return state.client is not _CANCELLED and not state.statused
+
+
+def _send_message_callback_to_blocking_iterator_adapter(rpc_event, state,
+ send_response_callback,
+ response_iterator):
+ while True:
+ response, proceed = _take_response_from_response_iterator(
+ rpc_event, state, response_iterator)
+ if proceed:
+ send_response_callback(response)
+ if not _is_rpc_state_active(state):
+ break
+ else:
+ break
+
+
+def _select_thread_pool_for_behavior(behavior, default_thread_pool):
+ if hasattr(behavior, 'experimental_thread_pool') and isinstance(
+ behavior.experimental_thread_pool, futures.ThreadPoolExecutor):
+ return behavior.experimental_thread_pool
+ else:
+ return default_thread_pool
+
+
+def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool):
+ unary_request = _unary_request(rpc_event, state,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary,
+ default_thread_pool)
+ return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.unary_unary, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_unary_stream(rpc_event, state, method_handler, default_thread_pool):
+ unary_request = _unary_request(rpc_event, state,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.unary_stream,
+ default_thread_pool)
+ return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.unary_stream, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_stream_unary(rpc_event, state, method_handler, default_thread_pool):
+ request_iterator = _RequestIterator(state, rpc_event.call,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.stream_unary,
+ default_thread_pool)
+ return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.stream_unary,
+ lambda: request_iterator,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_stream_stream(rpc_event, state, method_handler,
+ default_thread_pool):
+ request_iterator = _RequestIterator(state, rpc_event.call,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.stream_stream,
+ default_thread_pool)
+ return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.stream_stream,
+ lambda: request_iterator,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _find_method_handler(rpc_event, generic_handlers, interceptor_pipeline):
+
+ def query_handlers(handler_call_details):
+ for generic_handler in generic_handlers:
+ method_handler = generic_handler.service(handler_call_details)
+ if method_handler is not None:
+ return method_handler
+ return None
+
+ handler_call_details = _HandlerCallDetails(
+ _common.decode(rpc_event.call_details.method),
+ rpc_event.invocation_metadata)
+
+ if interceptor_pipeline is not None:
+ return interceptor_pipeline.execute(query_handlers,
+ handler_call_details)
+ else:
+ return query_handlers(handler_call_details)
+
+
+def _reject_rpc(rpc_event, status, details):
+ rpc_state = _RPCState()
+ operations = (
+ _get_initial_metadata_operation(rpc_state, None),
+ cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
+ cygrpc.SendStatusFromServerOperation(None, status, details,
+ _EMPTY_FLAGS),
+ )
+ rpc_event.call.start_server_batch(operations, lambda ignored_event: (
+ rpc_state,
+ (),
+ ))
+ return rpc_state
+
+
+def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
+ state = _RPCState()
+ with state.condition:
+ rpc_event.call.start_server_batch(
+ (cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
+ _receive_close_on_server(state))
+ state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
+ if method_handler.request_streaming:
+ if method_handler.response_streaming:
+ return state, _handle_stream_stream(rpc_event, state,
+ method_handler, thread_pool)
+ else:
+ return state, _handle_stream_unary(rpc_event, state,
+ method_handler, thread_pool)
+ else:
+ if method_handler.response_streaming:
+ return state, _handle_unary_stream(rpc_event, state,
+ method_handler, thread_pool)
+ else:
+ return state, _handle_unary_unary(rpc_event, state,
+ method_handler, thread_pool)
+
+
+def _handle_call(rpc_event, generic_handlers, interceptor_pipeline, thread_pool,
+ concurrency_exceeded):
+ if not rpc_event.success:
+ return None, None
+ if rpc_event.call_details.method is not None:
+ try:
+ method_handler = _find_method_handler(rpc_event, generic_handlers,
+ interceptor_pipeline)
+ except Exception as exception: # pylint: disable=broad-except
+ details = 'Exception servicing handler: {}'.format(exception)
+ _LOGGER.exception(details)
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
+ b'Error in service handler!'), None
+ if method_handler is None:
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
+ b'Method not found!'), None
+ elif concurrency_exceeded:
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
+ b'Concurrent RPC limit exceeded!'), None
+ else:
+ return _handle_with_method_handler(rpc_event, method_handler,
+ thread_pool)
+ else:
+ return None, None
+
+
+@enum.unique
+class _ServerStage(enum.Enum):
+ STOPPED = 'stopped'
+ STARTED = 'started'
+ GRACE = 'grace'
+
+
+class _ServerState(object):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, completion_queue, server, generic_handlers,
+ interceptor_pipeline, thread_pool, maximum_concurrent_rpcs):
+ self.lock = threading.RLock()
+ self.completion_queue = completion_queue
+ self.server = server
+ self.generic_handlers = list(generic_handlers)
+ self.interceptor_pipeline = interceptor_pipeline
+ self.thread_pool = thread_pool
+ self.stage = _ServerStage.STOPPED
+ self.termination_event = threading.Event()
+ self.shutdown_events = [self.termination_event]
+ self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
+ self.active_rpc_count = 0
+
+ # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
+ self.rpc_states = set()
+ self.due = set()
+
+ # A "volatile" flag to interrupt the daemon serving thread
+ self.server_deallocated = False
+
+
+def _add_generic_handlers(state, generic_handlers):
+ with state.lock:
+ state.generic_handlers.extend(generic_handlers)
+
+
+def _add_insecure_port(state, address):
+ with state.lock:
+ return state.server.add_http2_port(address)
+
+
+def _add_secure_port(state, address, server_credentials):
+ with state.lock:
+ return state.server.add_http2_port(address,
+ server_credentials._credentials)
+
+
+def _request_call(state):
+ state.server.request_call(state.completion_queue, state.completion_queue,
+ _REQUEST_CALL_TAG)
+ state.due.add(_REQUEST_CALL_TAG)
+
+
+# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
+def _stop_serving(state):
+ if not state.rpc_states and not state.due:
+ state.server.destroy()
+ for shutdown_event in state.shutdown_events:
+ shutdown_event.set()
+ state.stage = _ServerStage.STOPPED
+ return True
+ else:
+ return False
+
+
+def _on_call_completed(state):
+ with state.lock:
+ state.active_rpc_count -= 1
+
+
+def _process_event_and_continue(state, event):
+ should_continue = True
+ if event.tag is _SHUTDOWN_TAG:
+ with state.lock:
+ state.due.remove(_SHUTDOWN_TAG)
+ if _stop_serving(state):
+ should_continue = False
+ elif event.tag is _REQUEST_CALL_TAG:
+ with state.lock:
+ state.due.remove(_REQUEST_CALL_TAG)
+ concurrency_exceeded = (
+ state.maximum_concurrent_rpcs is not None and
+ state.active_rpc_count >= state.maximum_concurrent_rpcs)
+ rpc_state, rpc_future = _handle_call(event, state.generic_handlers,
+ state.interceptor_pipeline,
+ state.thread_pool,
+ concurrency_exceeded)
+ if rpc_state is not None:
+ state.rpc_states.add(rpc_state)
+ if rpc_future is not None:
+ state.active_rpc_count += 1
+ rpc_future.add_done_callback(
+ lambda unused_future: _on_call_completed(state))
+ if state.stage is _ServerStage.STARTED:
+ _request_call(state)
+ elif _stop_serving(state):
+ should_continue = False
+ else:
+ rpc_state, callbacks = event.tag(event)
+ for callback in callbacks:
+ try:
+ callback()
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception('Exception calling callback!')
+ if rpc_state is not None:
+ with state.lock:
+ state.rpc_states.remove(rpc_state)
+ if _stop_serving(state):
+ should_continue = False
+ return should_continue
+
+
+def _serve(state):
+ while True:
+ timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S
+ event = state.completion_queue.poll(timeout)
+ if state.server_deallocated:
+ _begin_shutdown_once(state)
+ if event.completion_type != cygrpc.CompletionType.queue_timeout:
+ if not _process_event_and_continue(state, event):
+ return
+ # We want to force the deletion of the previous event
+ # ~before~ we poll again; if the event has a reference
+ # to a shutdown Call object, this can induce spinlock.
+ event = None
+
+
+def _begin_shutdown_once(state):
+ with state.lock:
+ if state.stage is _ServerStage.STARTED:
+ state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
+ state.stage = _ServerStage.GRACE
+ state.due.add(_SHUTDOWN_TAG)
+
+
+def _stop(state, grace):
+ with state.lock:
+ if state.stage is _ServerStage.STOPPED:
+ shutdown_event = threading.Event()
+ shutdown_event.set()
+ return shutdown_event
+ else:
+ _begin_shutdown_once(state)
+ shutdown_event = threading.Event()
+ state.shutdown_events.append(shutdown_event)
+ if grace is None:
+ state.server.cancel_all_calls()
+ else:
+
+ def cancel_all_calls_after_grace():
+ shutdown_event.wait(timeout=grace)
+ with state.lock:
+ state.server.cancel_all_calls()
+
+ thread = threading.Thread(target=cancel_all_calls_after_grace)
+ thread.start()
+ return shutdown_event
+ shutdown_event.wait()
+ return shutdown_event
+
+
+def _start(state):
+ with state.lock:
+ if state.stage is not _ServerStage.STOPPED:
+ raise ValueError('Cannot start already-started server!')
+ state.server.start()
+ state.stage = _ServerStage.STARTED
+ _request_call(state)
+
+ thread = threading.Thread(target=_serve, args=(state,))
+ thread.daemon = True
+ thread.start()
+
+
+def _validate_generic_rpc_handlers(generic_rpc_handlers):
+ for generic_rpc_handler in generic_rpc_handlers:
+ service_attribute = getattr(generic_rpc_handler, 'service', None)
+ if service_attribute is None:
+ raise AttributeError(
+ '"{}" must conform to grpc.GenericRpcHandler type but does '
+ 'not have "service" method!'.format(generic_rpc_handler))
+
+
+def _augment_options(base_options, compression):
+ compression_option = _compression.create_channel_option(compression)
+ return tuple(base_options) + compression_option
+
+
+class _Server(grpc.Server):
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, thread_pool, generic_handlers, interceptors, options,
+ maximum_concurrent_rpcs, compression, xds):
+ completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server(_augment_options(options, compression), xds)
+ server.register_completion_queue(completion_queue)
+ self._state = _ServerState(completion_queue, server, generic_handlers,
+ _interceptor.service_pipeline(interceptors),
+ thread_pool, maximum_concurrent_rpcs)
+
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ _validate_generic_rpc_handlers(generic_rpc_handlers)
+ _add_generic_handlers(self._state, generic_rpc_handlers)
+
+ def add_insecure_port(self, address):
+ return _common.validate_port_binding_result(
+ address, _add_insecure_port(self._state, _common.encode(address)))
+
+ def add_secure_port(self, address, server_credentials):
+ return _common.validate_port_binding_result(
+ address,
+ _add_secure_port(self._state, _common.encode(address),
+ server_credentials))
+
+ def start(self):
+ _start(self._state)
+
+ def wait_for_termination(self, timeout=None):
+ # NOTE(https://bugs.python.org/issue35935)
+ # Remove this workaround once threading.Event.wait() is working with
+ # CTRL+C across platforms.
+ return _common.wait(self._state.termination_event.wait,
+ self._state.termination_event.is_set,
+ timeout=timeout)
+
+ def stop(self, grace):
+ return _stop(self._state, grace)
+
+ def __del__(self):
+ if hasattr(self, '_state'):
+ # We can not grab a lock in __del__(), so set a flag to signal the
+ # serving daemon thread (if it exists) to initiate shutdown.
+ self._state.server_deallocated = True
+
+
+def create_server(thread_pool, generic_rpc_handlers, interceptors, options,
+ maximum_concurrent_rpcs, compression, xds):
+ _validate_generic_rpc_handlers(generic_rpc_handlers)
+ return _Server(thread_pool, generic_rpc_handlers, interceptors, options,
+ maximum_concurrent_rpcs, compression, xds)
diff --git a/contrib/python/grpcio/py2/grpc/_utilities.py b/contrib/python/grpcio/py2/grpc/_utilities.py
new file mode 100644
index 0000000000..9293c9bcef
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/_utilities.py
@@ -0,0 +1,168 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Internal utilities for gRPC Python."""
+
+import collections
+import logging
+import threading
+import time
+
+import grpc
+from grpc import _common
+import six
+
+_LOGGER = logging.getLogger(__name__)
+
+_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
+ 'Exception calling connectivity future "done" callback!')
+
+
+class RpcMethodHandler(
+ collections.namedtuple('_RpcMethodHandler', (
+ 'request_streaming',
+ 'response_streaming',
+ 'request_deserializer',
+ 'response_serializer',
+ 'unary_unary',
+ 'unary_stream',
+ 'stream_unary',
+ 'stream_stream',
+ )), grpc.RpcMethodHandler):
+ pass
+
+
+class DictionaryGenericHandler(grpc.ServiceRpcHandler):
+
+ def __init__(self, service, method_handlers):
+ self._name = service
+ self._method_handlers = {
+ _common.fully_qualified_method(service, method): method_handler
+ for method, method_handler in six.iteritems(method_handlers)
+ }
+
+ def service_name(self):
+ return self._name
+
+ def service(self, handler_call_details):
+ return self._method_handlers.get(handler_call_details.method)
+
+
+class _ChannelReadyFuture(grpc.Future):
+
+ def __init__(self, channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout):
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise grpc.FutureCancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise grpc.FutureTimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity):
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is grpc.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ try:
+ done_callback(self)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE)
+
+ def cancel(self):
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return False
+
+ for done_callback in done_callbacks:
+ try:
+ done_callback(self)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE)
+
+ return True
+
+ def cancelled(self):
+ with self._condition:
+ return self._cancelled
+
+ def running(self):
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self):
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout=None):
+ self._block(timeout)
+
+ def exception(self, timeout=None):
+ self._block(timeout)
+
+ def traceback(self, timeout=None):
+ self._block(timeout)
+
+ def add_done_callback(self, fn):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
+
+
+def channel_ready_future(channel):
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/contrib/python/grpcio/py2/grpc/beta/__init__.py b/contrib/python/grpcio/py2/grpc/beta/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/beta/_client_adaptations.py b/contrib/python/grpcio/py2/grpc/beta/_client_adaptations.py
new file mode 100644
index 0000000000..652ae0ea17
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/_client_adaptations.py
@@ -0,0 +1,706 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
+
+import grpc
+from grpc import _common
+from grpc.beta import _metadata
+from grpc.beta import interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.foundation import future
+from grpc.framework.interfaces.face import face
+
+# pylint: disable=too-many-arguments,too-many-locals,unused-argument
+
+_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
+ grpc.StatusCode.CANCELLED:
+ (face.Abortion.Kind.CANCELLED, face.CancellationError),
+ grpc.StatusCode.UNKNOWN:
+ (face.Abortion.Kind.REMOTE_FAILURE, face.RemoteError),
+ grpc.StatusCode.DEADLINE_EXCEEDED:
+ (face.Abortion.Kind.EXPIRED, face.ExpirationError),
+ grpc.StatusCode.UNIMPLEMENTED:
+ (face.Abortion.Kind.LOCAL_FAILURE, face.LocalError),
+}
+
+
+def _effective_metadata(metadata, metadata_transformer):
+ non_none_metadata = () if metadata is None else metadata
+ if metadata_transformer is None:
+ return non_none_metadata
+ else:
+ return metadata_transformer(non_none_metadata)
+
+
+def _credentials(grpc_call_options):
+ return None if grpc_call_options is None else grpc_call_options.credentials
+
+
+def _abortion(rpc_error_call):
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
+ return face.Abortion(error_kind, rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code,
+ rpc_error_call.details())
+
+
+def _abortion_error(rpc_error_call):
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ exception_class = face.AbortionError if pair is None else pair[1]
+ return exception_class(rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code,
+ rpc_error_call.details())
+
+
+class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
+
+ def disable_next_request_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+
+
+class _Rendezvous(future.Future, face.Call):
+
+ def __init__(self, response_future, response_iterator, call):
+ self._future = response_future
+ self._iterator = response_iterator
+ self._call = call
+
+ def cancel(self):
+ return self._call.cancel()
+
+ def cancelled(self):
+ return self._future.cancelled()
+
+ def running(self):
+ return self._future.running()
+
+ def done(self):
+ return self._future.done()
+
+ def result(self, timeout=None):
+ try:
+ return self._future.result(timeout=timeout)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def exception(self, timeout=None):
+ try:
+ rpc_error_call = self._future.exception(timeout=timeout)
+ if rpc_error_call is None:
+ return None
+ else:
+ return _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def traceback(self, timeout=None):
+ try:
+ return self._future.traceback(timeout=timeout)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def add_done_callback(self, fn):
+ self._future.add_done_callback(lambda ignored_callback: fn(self))
+
+ def __iter__(self):
+ return self
+
+ def _next(self):
+ try:
+ return next(self._iterator)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+ def is_active(self):
+ return self._call.is_active()
+
+ def time_remaining(self):
+ return self._call.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+
+ def done_callback():
+ if self.code() is not grpc.StatusCode.OK:
+ abortion_callback(_abortion(self._call))
+
+ registered = self._call.add_callback(done_callback)
+ return None if registered else done_callback()
+
+ def protocol_context(self):
+ return _InvocationProtocolContext()
+
+ def initial_metadata(self):
+ return _metadata.beta(self._call.initial_metadata())
+
+ def terminal_metadata(self):
+ return _metadata.beta(self._call.terminal_metadata())
+
+ def code(self):
+ return self._call.code()
+
+ def details(self):
+ return self._call.details()
+
+
+def _blocking_unary_unary(channel, group, method, timeout, with_call,
+ protocol_options, metadata, metadata_transformer,
+ request, request_serializer, response_deserializer):
+ try:
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable.with_call(
+ request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_unary_unary(channel, group, method, timeout, protocol_options,
+ metadata, metadata_transformer, request,
+ request_serializer, response_deserializer):
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_future = multi_callable.future(
+ request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _unary_stream(channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request, request_serializer,
+ response_deserializer):
+ multi_callable = channel.unary_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+def _blocking_stream_unary(channel, group, method, timeout, with_call,
+ protocol_options, metadata, metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer):
+ try:
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable.with_call(
+ request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_stream_unary(channel, group, method, timeout, protocol_options,
+ metadata, metadata_transformer, request_iterator,
+ request_serializer, response_deserializer):
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_future = multi_callable.future(
+ request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _stream_stream(channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request_iterator, request_serializer,
+ response_deserializer):
+ multi_callable = channel.stream_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ return _blocking_unary_unary(self._channel, self._group, self._method,
+ timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer,
+ request, self._request_serializer,
+ self._response_deserializer)
+
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ return _future_unary_unary(self._channel, self._group, self._method,
+ timeout, protocol_options, metadata,
+ self._metadata_transformer, request,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ return _unary_stream(self._channel, self._group, self._method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ return _blocking_stream_unary(self._channel, self._group, self._method,
+ timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer,
+ request_iterator,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def future(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ return _future_stream_unary(self._channel, self._group, self._method,
+ timeout, protocol_options, metadata,
+ self._metadata_transformer,
+ request_iterator, self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ return _stream_stream(self._channel, self._group, self._method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request_iterator,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _GenericStub(face.GenericStub):
+
+ def __init__(self, channel, metadata_transformer, request_serializers,
+ response_deserializers):
+ self._channel = channel
+ self._metadata_transformer = metadata_transformer
+ self._request_serializers = request_serializers or {}
+ self._response_deserializers = response_deserializers or {}
+
+ def blocking_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ with_call=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _blocking_unary_unary(self._channel, group, method, timeout,
+ with_call, protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def future_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _future_unary_unary(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def inline_unary_stream(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _unary_stream(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def blocking_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _blocking_stream_unary(self._channel, group, method, timeout,
+ with_call, protocol_options, metadata,
+ self._metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer)
+
+ def future_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _future_stream_unary(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer)
+
+ def inline_stream_stream(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _stream_stream(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request_iterator,
+ request_serializer, response_deserializer)
+
+ def event_unary_unary(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_unary_stream(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_unary(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_stream(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def unary_unary(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _UnaryUnaryMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def unary_stream(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _UnaryStreamMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def stream_unary(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _StreamUnaryMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def stream_stream(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _StreamStreamMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+class _DynamicStub(face.DynamicStub):
+
+ def __init__(self, backing_generic_stub, group, cardinalities):
+ self._generic_stub = backing_generic_stub
+ self._group = group
+ self._cardinalities = cardinalities
+
+ def __getattr__(self, attr):
+ method_cardinality = self._cardinalities.get(attr)
+ if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return self._generic_stub.unary_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return self._generic_stub.unary_stream(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return self._generic_stub.stream_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return self._generic_stub.stream_stream(self._group, attr)
+ else:
+ raise AttributeError('_DynamicStub object has no attribute "%s"!' %
+ attr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+def generic_stub(channel, host, metadata_transformer, request_serializers,
+ response_deserializers):
+ return _GenericStub(channel, metadata_transformer, request_serializers,
+ response_deserializers)
+
+
+def dynamic_stub(channel, service, cardinalities, host, metadata_transformer,
+ request_serializers, response_deserializers):
+ return _DynamicStub(
+ _GenericStub(channel, metadata_transformer, request_serializers,
+ response_deserializers), service, cardinalities)
diff --git a/contrib/python/grpcio/py2/grpc/beta/_metadata.py b/contrib/python/grpcio/py2/grpc/beta/_metadata.py
new file mode 100644
index 0000000000..b7c8535285
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/_metadata.py
@@ -0,0 +1,52 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""API metadata conversion utilities."""
+
+import collections
+
+_Metadatum = collections.namedtuple('_Metadatum', (
+ 'key',
+ 'value',
+))
+
+
+def _beta_metadatum(key, value):
+ beta_key = key if isinstance(key, (bytes,)) else key.encode('ascii')
+ beta_value = value if isinstance(value, (bytes,)) else value.encode('ascii')
+ return _Metadatum(beta_key, beta_value)
+
+
+def _metadatum(beta_key, beta_value):
+ key = beta_key if isinstance(beta_key, (str,)) else beta_key.decode('utf8')
+ if isinstance(beta_value, (str,)) or key[-4:] == '-bin':
+ value = beta_value
+ else:
+ value = beta_value.decode('utf8')
+ return _Metadatum(key, value)
+
+
+def beta(metadata):
+ if metadata is None:
+ return ()
+ else:
+ return tuple(_beta_metadatum(key, value) for key, value in metadata)
+
+
+def unbeta(beta_metadata):
+ if beta_metadata is None:
+ return ()
+ else:
+ return tuple(
+ _metadatum(beta_key, beta_value)
+ for beta_key, beta_value in beta_metadata)
diff --git a/contrib/python/grpcio/py2/grpc/beta/_server_adaptations.py b/contrib/python/grpcio/py2/grpc/beta/_server_adaptations.py
new file mode 100644
index 0000000000..8843a3c550
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/_server_adaptations.py
@@ -0,0 +1,385 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
+
+import collections
+import threading
+
+import grpc
+from grpc import _common
+from grpc.beta import _metadata
+from grpc.beta import interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import abandonment
+from grpc.framework.foundation import logging_pool
+from grpc.framework.foundation import stream
+from grpc.framework.interfaces.face import face
+
+# pylint: disable=too-many-return-statements
+
+_DEFAULT_POOL_SIZE = 8
+
+
+class _ServerProtocolContext(interfaces.GRPCServicerContext):
+
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
+
+ def peer(self):
+ return self._servicer_context.peer()
+
+ def disable_next_response_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+
+
+class _FaceServicerContext(face.ServicerContext):
+
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
+
+ def is_active(self):
+ return self._servicer_context.is_active()
+
+ def time_remaining(self):
+ return self._servicer_context.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+ raise NotImplementedError(
+ 'add_abortion_callback no longer supported server-side!')
+
+ def cancel(self):
+ self._servicer_context.cancel()
+
+ def protocol_context(self):
+ return _ServerProtocolContext(self._servicer_context)
+
+ def invocation_metadata(self):
+ return _metadata.beta(self._servicer_context.invocation_metadata())
+
+ def initial_metadata(self, initial_metadata):
+ self._servicer_context.send_initial_metadata(
+ _metadata.unbeta(initial_metadata))
+
+ def terminal_metadata(self, terminal_metadata):
+ self._servicer_context.set_terminal_metadata(
+ _metadata.unbeta(terminal_metadata))
+
+ def code(self, code):
+ self._servicer_context.set_code(code)
+
+ def details(self, details):
+ self._servicer_context.set_details(details)
+
+
+def _adapt_unary_request_inline(unary_request_inline):
+
+ def adaptation(request, servicer_context):
+ return unary_request_inline(request,
+ _FaceServicerContext(servicer_context))
+
+ return adaptation
+
+
+def _adapt_stream_request_inline(stream_request_inline):
+
+ def adaptation(request_iterator, servicer_context):
+ return stream_request_inline(request_iterator,
+ _FaceServicerContext(servicer_context))
+
+ return adaptation
+
+
+class _Callback(stream.Consumer):
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._terminated = False
+ self._cancelled = False
+
+ def consume(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._condition.notify_all()
+
+ def terminate(self):
+ with self._condition:
+ self._terminated = True
+ self._condition.notify_all()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._terminated = True
+ self._condition.notify_all()
+
+ def cancel(self):
+ with self._condition:
+ self._cancelled = True
+ self._condition.notify_all()
+
+ def draw_one_value(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._values:
+ return self._values.pop(0)
+ elif self._terminated:
+ return None
+ else:
+ self._condition.wait()
+
+ def draw_all_values(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._terminated:
+ all_values = tuple(self._values)
+ self._values = None
+ return all_values
+ else:
+ self._condition.wait()
+
+
+def _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context):
+ thread_joined = threading.Event()
+
+ def pipe_requests():
+ for request in request_iterator:
+ if not servicer_context.is_active() or thread_joined.is_set():
+ return
+ request_consumer.consume(request)
+ if not servicer_context.is_active() or thread_joined.is_set():
+ return
+ request_consumer.terminate()
+
+ request_pipe_thread = threading.Thread(target=pipe_requests)
+ request_pipe_thread.daemon = True
+ request_pipe_thread.start()
+
+
+def _adapt_unary_unary_event(unary_unary_event):
+
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_unary_event(request, callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ return callback.draw_all_values()[0]
+
+ return adaptation
+
+
+def _adapt_unary_stream_event(unary_stream_event):
+
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_stream_event(request, callback,
+ _FaceServicerContext(servicer_context))
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+
+ return adaptation
+
+
+def _adapt_stream_unary_event(stream_unary_event):
+
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_unary_event(
+ callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context)
+ return callback.draw_all_values()[0]
+
+ return adaptation
+
+
+def _adapt_stream_stream_event(stream_stream_event):
+
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_stream_event(
+ callback, _FaceServicerContext(servicer_context))
+ _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context)
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+
+ return adaptation
+
+
+class _SimpleMethodHandler(
+ collections.namedtuple('_MethodHandler', (
+ 'request_streaming',
+ 'response_streaming',
+ 'request_deserializer',
+ 'response_serializer',
+ 'unary_unary',
+ 'unary_stream',
+ 'stream_unary',
+ 'stream_stream',
+ )), grpc.RpcMethodHandler):
+ pass
+
+
+def _simple_method_handler(implementation, request_deserializer,
+ response_serializer):
+ if implementation.style is style.Service.INLINE:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_request_inline(implementation.unary_unary_inline),
+ None, None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_request_inline(implementation.unary_stream_inline),
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None,
+ None,
+ _adapt_stream_request_inline(
+ implementation.stream_unary_inline), None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None,
+ None, None,
+ _adapt_stream_request_inline(
+ implementation.stream_stream_inline))
+ elif implementation.style is style.Service.EVENT:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_unary_event(implementation.unary_unary_event),
+ None, None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_stream_event(implementation.unary_stream_event),
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None,
+ None,
+ _adapt_stream_unary_event(implementation.stream_unary_event),
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None,
+ None, None,
+ _adapt_stream_stream_event(implementation.stream_stream_event))
+ raise ValueError()
+
+
+def _flatten_method_pair_map(method_pair_map):
+ method_pair_map = method_pair_map or {}
+ flat_map = {}
+ for method_pair in method_pair_map:
+ method = _common.fully_qualified_method(method_pair[0], method_pair[1])
+ flat_map[method] = method_pair_map[method_pair]
+ return flat_map
+
+
+class _GenericRpcHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, method_implementations, multi_method_implementation,
+ request_deserializers, response_serializers):
+ self._method_implementations = _flatten_method_pair_map(
+ method_implementations)
+ self._request_deserializers = _flatten_method_pair_map(
+ request_deserializers)
+ self._response_serializers = _flatten_method_pair_map(
+ response_serializers)
+ self._multi_method_implementation = multi_method_implementation
+
+ def service(self, handler_call_details):
+ method_implementation = self._method_implementations.get(
+ handler_call_details.method)
+ if method_implementation is not None:
+ return _simple_method_handler(
+ method_implementation,
+ self._request_deserializers.get(handler_call_details.method),
+ self._response_serializers.get(handler_call_details.method))
+ elif self._multi_method_implementation is None:
+ return None
+ else:
+ try:
+ return None #TODO(nathaniel): call the multimethod.
+ except face.NoSuchMethodError:
+ return None
+
+
+class _Server(interfaces.Server):
+
+ def __init__(self, grpc_server):
+ self._grpc_server = grpc_server
+
+ def add_insecure_port(self, address):
+ return self._grpc_server.add_insecure_port(address)
+
+ def add_secure_port(self, address, server_credentials):
+ return self._grpc_server.add_secure_port(address, server_credentials)
+
+ def start(self):
+ self._grpc_server.start()
+
+ def stop(self, grace):
+ return self._grpc_server.stop(grace)
+
+ def __enter__(self):
+ self._grpc_server.start()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._grpc_server.stop(None)
+ return False
+
+
+def server(service_implementations, multi_method_implementation,
+ request_deserializers, response_serializers, thread_pool,
+ thread_pool_size):
+ generic_rpc_handler = _GenericRpcHandler(service_implementations,
+ multi_method_implementation,
+ request_deserializers,
+ response_serializers)
+ if thread_pool is None:
+ effective_thread_pool = logging_pool.pool(
+ _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size
+ )
+ else:
+ effective_thread_pool = thread_pool
+ return _Server(
+ grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
diff --git a/contrib/python/grpcio/py2/grpc/beta/implementations.py b/contrib/python/grpcio/py2/grpc/beta/implementations.py
new file mode 100644
index 0000000000..43312aac7c
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/implementations.py
@@ -0,0 +1,311 @@
+# Copyright 2015-2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Entry points into the Beta API of gRPC Python."""
+
+# threading is referenced from specification in this module.
+import threading # pylint: disable=unused-import
+
+# interfaces, cardinality, and face are referenced from specification in this
+# module.
+import grpc
+from grpc import _auth
+from grpc.beta import _client_adaptations
+from grpc.beta import _metadata
+from grpc.beta import _server_adaptations
+from grpc.beta import interfaces # pylint: disable=unused-import
+from grpc.framework.common import cardinality # pylint: disable=unused-import
+from grpc.framework.interfaces.face import \
+ face # pylint: disable=unused-import
+
+# pylint: disable=too-many-arguments
+
+ChannelCredentials = grpc.ChannelCredentials
+ssl_channel_credentials = grpc.ssl_channel_credentials
+CallCredentials = grpc.CallCredentials
+
+
+def metadata_call_credentials(metadata_plugin, name=None):
+
+ def plugin(context, callback):
+
+ def wrapped_callback(beta_metadata, error):
+ callback(_metadata.unbeta(beta_metadata), error)
+
+ metadata_plugin(context, wrapped_callback)
+
+ return grpc.metadata_call_credentials(plugin, name=name)
+
+
+def google_call_credentials(credentials):
+ """Construct CallCredentials from GoogleCredentials.
+
+ Args:
+ credentials: A GoogleCredentials object from the oauth2client library.
+
+ Returns:
+ A CallCredentials object for use in a GRPCCallOptions object.
+ """
+ return metadata_call_credentials(_auth.GoogleCallCredentials(credentials))
+
+
+access_token_call_credentials = grpc.access_token_call_credentials
+composite_call_credentials = grpc.composite_call_credentials
+composite_channel_credentials = grpc.composite_channel_credentials
+
+
+class Channel(object):
+ """A channel to a remote host through which RPCs may be conducted.
+
+ Only the "subscribe" and "unsubscribe" methods are supported for application
+ use. This class' instance constructor and all other attributes are
+ unsupported.
+ """
+
+ def __init__(self, channel):
+ self._channel = channel
+
+ def subscribe(self, callback, try_to_connect=None):
+ """Subscribes to this Channel's connectivity.
+
+ Args:
+ callback: A callable to be invoked and passed an
+ interfaces.ChannelConnectivity identifying this Channel's connectivity.
+ The callable will be invoked immediately upon subscription and again for
+ every change to this Channel's connectivity thereafter until it is
+ unsubscribed.
+ try_to_connect: A boolean indicating whether or not this Channel should
+ attempt to connect if it is not already connected and ready to conduct
+ RPCs.
+ """
+ self._channel.subscribe(callback, try_to_connect=try_to_connect)
+
+ def unsubscribe(self, callback):
+ """Unsubscribes a callback from this Channel's connectivity.
+
+ Args:
+ callback: A callable previously registered with this Channel from having
+ been passed to its "subscribe" method.
+ """
+ self._channel.unsubscribe(callback)
+
+
+def insecure_channel(host, port):
+ """Creates an insecure Channel to a remote host.
+
+ Args:
+ host: The name of the remote host to which to connect.
+ port: The port of the remote host to which to connect.
+ If None only the 'host' part will be used.
+
+ Returns:
+ A Channel to the remote host through which RPCs may be conducted.
+ """
+ channel = grpc.insecure_channel(host if port is None else '%s:%d' %
+ (host, port))
+ return Channel(channel)
+
+
+def secure_channel(host, port, channel_credentials):
+ """Creates a secure Channel to a remote host.
+
+ Args:
+ host: The name of the remote host to which to connect.
+ port: The port of the remote host to which to connect.
+ If None only the 'host' part will be used.
+ channel_credentials: A ChannelCredentials.
+
+ Returns:
+ A secure Channel to the remote host through which RPCs may be conducted.
+ """
+ channel = grpc.secure_channel(
+ host if port is None else '%s:%d' % (host, port), channel_credentials)
+ return Channel(channel)
+
+
+class StubOptions(object):
+ """A value encapsulating the various options for creation of a Stub.
+
+ This class and its instances have no supported interface - it exists to define
+ the type of its instances and its instances exist to be passed to other
+ functions.
+ """
+
+ def __init__(self, host, request_serializers, response_deserializers,
+ metadata_transformer, thread_pool, thread_pool_size):
+ self.host = host
+ self.request_serializers = request_serializers
+ self.response_deserializers = response_deserializers
+ self.metadata_transformer = metadata_transformer
+ self.thread_pool = thread_pool
+ self.thread_pool_size = thread_pool_size
+
+
+_EMPTY_STUB_OPTIONS = StubOptions(None, None, None, None, None, None)
+
+
+def stub_options(host=None,
+ request_serializers=None,
+ response_deserializers=None,
+ metadata_transformer=None,
+ thread_pool=None,
+ thread_pool_size=None):
+ """Creates a StubOptions value to be passed at stub creation.
+
+ All parameters are optional and should always be passed by keyword.
+
+ Args:
+ host: A host string to set on RPC calls.
+ request_serializers: A dictionary from service name-method name pair to
+ request serialization behavior.
+ response_deserializers: A dictionary from service name-method name pair to
+ response deserialization behavior.
+ metadata_transformer: A callable that given a metadata object produces
+ another metadata object to be used in the underlying communication on the
+ wire.
+ thread_pool: A thread pool to use in stubs.
+ thread_pool_size: The size of thread pool to create for use in stubs;
+ ignored if thread_pool has been passed.
+
+ Returns:
+ A StubOptions value created from the passed parameters.
+ """
+ return StubOptions(host, request_serializers, response_deserializers,
+ metadata_transformer, thread_pool, thread_pool_size)
+
+
+def generic_stub(channel, options=None):
+ """Creates a face.GenericStub on which RPCs can be made.
+
+ Args:
+ channel: A Channel for use by the created stub.
+ options: A StubOptions customizing the created stub.
+
+ Returns:
+ A face.GenericStub on which RPCs can be made.
+ """
+ effective_options = _EMPTY_STUB_OPTIONS if options is None else options
+ return _client_adaptations.generic_stub(
+ channel._channel, # pylint: disable=protected-access
+ effective_options.host,
+ effective_options.metadata_transformer,
+ effective_options.request_serializers,
+ effective_options.response_deserializers)
+
+
+def dynamic_stub(channel, service, cardinalities, options=None):
+ """Creates a face.DynamicStub with which RPCs can be invoked.
+
+ Args:
+ channel: A Channel for the returned face.DynamicStub to use.
+ service: The package-qualified full name of the service.
+ cardinalities: A dictionary from RPC method name to cardinality.Cardinality
+ value identifying the cardinality of the RPC method.
+ options: An optional StubOptions value further customizing the functionality
+ of the returned face.DynamicStub.
+
+ Returns:
+ A face.DynamicStub with which RPCs can be invoked.
+ """
+ effective_options = _EMPTY_STUB_OPTIONS if options is None else options
+ return _client_adaptations.dynamic_stub(
+ channel._channel, # pylint: disable=protected-access
+ service,
+ cardinalities,
+ effective_options.host,
+ effective_options.metadata_transformer,
+ effective_options.request_serializers,
+ effective_options.response_deserializers)
+
+
+ServerCredentials = grpc.ServerCredentials
+ssl_server_credentials = grpc.ssl_server_credentials
+
+
+class ServerOptions(object):
+ """A value encapsulating the various options for creation of a Server.
+
+ This class and its instances have no supported interface - it exists to define
+ the type of its instances and its instances exist to be passed to other
+ functions.
+ """
+
+ def __init__(self, multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size,
+ default_timeout, maximum_timeout):
+ self.multi_method_implementation = multi_method_implementation
+ self.request_deserializers = request_deserializers
+ self.response_serializers = response_serializers
+ self.thread_pool = thread_pool
+ self.thread_pool_size = thread_pool_size
+ self.default_timeout = default_timeout
+ self.maximum_timeout = maximum_timeout
+
+
+_EMPTY_SERVER_OPTIONS = ServerOptions(None, None, None, None, None, None, None)
+
+
+def server_options(multi_method_implementation=None,
+ request_deserializers=None,
+ response_serializers=None,
+ thread_pool=None,
+ thread_pool_size=None,
+ default_timeout=None,
+ maximum_timeout=None):
+ """Creates a ServerOptions value to be passed at server creation.
+
+ All parameters are optional and should always be passed by keyword.
+
+ Args:
+ multi_method_implementation: A face.MultiMethodImplementation to be called
+ to service an RPC if the server has no specific method implementation for
+ the name of the RPC for which service was requested.
+ request_deserializers: A dictionary from service name-method name pair to
+ request deserialization behavior.
+ response_serializers: A dictionary from service name-method name pair to
+ response serialization behavior.
+ thread_pool: A thread pool to use in stubs.
+ thread_pool_size: The size of thread pool to create for use in stubs;
+ ignored if thread_pool has been passed.
+ default_timeout: A duration in seconds to allow for RPC service when
+ servicing RPCs that did not include a timeout value when invoked.
+ maximum_timeout: A duration in seconds to allow for RPC service when
+ servicing RPCs no matter what timeout value was passed when the RPC was
+ invoked.
+
+ Returns:
+ A StubOptions value created from the passed parameters.
+ """
+ return ServerOptions(multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size,
+ default_timeout, maximum_timeout)
+
+
+def server(service_implementations, options=None):
+ """Creates an interfaces.Server with which RPCs can be serviced.
+
+ Args:
+ service_implementations: A dictionary from service name-method name pair to
+ face.MethodImplementation.
+ options: An optional ServerOptions value further customizing the
+ functionality of the returned Server.
+
+ Returns:
+ An interfaces.Server with which RPCs can be serviced.
+ """
+ effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
+ return _server_adaptations.server(
+ service_implementations, effective_options.multi_method_implementation,
+ effective_options.request_deserializers,
+ effective_options.response_serializers, effective_options.thread_pool,
+ effective_options.thread_pool_size)
diff --git a/contrib/python/grpcio/py2/grpc/beta/interfaces.py b/contrib/python/grpcio/py2/grpc/beta/interfaces.py
new file mode 100644
index 0000000000..a1713329cc
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/interfaces.py
@@ -0,0 +1,164 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Constants and interfaces of the Beta API of gRPC Python."""
+
+import abc
+
+import grpc
+import six
+
+ChannelConnectivity = grpc.ChannelConnectivity
+# FATAL_FAILURE was a Beta-API name for SHUTDOWN
+ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN
+
+StatusCode = grpc.StatusCode
+
+
+class GRPCCallOptions(object):
+ """A value encapsulating gRPC-specific options passed on RPC invocation.
+
+ This class and its instances have no supported interface - it exists to
+ define the type of its instances and its instances exist to be passed to
+ other functions.
+ """
+
+ def __init__(self, disable_compression, subcall_of, credentials):
+ self.disable_compression = disable_compression
+ self.subcall_of = subcall_of
+ self.credentials = credentials
+
+
+def grpc_call_options(disable_compression=False, credentials=None):
+ """Creates a GRPCCallOptions value to be passed at RPC invocation.
+
+ All parameters are optional and should always be passed by keyword.
+
+ Args:
+ disable_compression: A boolean indicating whether or not compression should
+ be disabled for the request object of the RPC. Only valid for
+ request-unary RPCs.
+ credentials: A CallCredentials object to use for the invoked RPC.
+ """
+ return GRPCCallOptions(disable_compression, None, credentials)
+
+
+GRPCAuthMetadataContext = grpc.AuthMetadataContext
+GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback
+GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin
+
+
+class GRPCServicerContext(six.with_metaclass(abc.ABCMeta)):
+ """Exposes gRPC-specific options and behaviors to code servicing RPCs."""
+
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
+
+ Returns:
+ A string identifying the peer that invoked the RPC being serviced.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def disable_next_response_compression(self):
+ """Disables compression of the next response passed by the application."""
+ raise NotImplementedError()
+
+
+class GRPCInvocationContext(six.with_metaclass(abc.ABCMeta)):
+ """Exposes gRPC-specific options and behaviors to code invoking RPCs."""
+
+ @abc.abstractmethod
+ def disable_next_request_compression(self):
+ """Disables compression of the next request passed by the application."""
+ raise NotImplementedError()
+
+
+class Server(six.with_metaclass(abc.ABCMeta)):
+ """Services RPCs."""
+
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Reserves a port for insecure RPC service once this Server becomes active.
+
+ This method may only be called before calling this Server's start method is
+ called.
+
+ Args:
+ address: The address for which to open a port.
+
+ Returns:
+ An integer port on which RPCs will be serviced after this link has been
+ started. This is typically the same number as the port number contained
+ in the passed address, but will likely be different if the port number
+ contained in the passed address was zero.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Reserves a port for secure RPC service after this Server becomes active.
+
+ This method may only be called before calling this Server's start method is
+ called.
+
+ Args:
+ address: The address for which to open a port.
+ server_credentials: A ServerCredentials.
+
+ Returns:
+ An integer port on which RPCs will be serviced after this link has been
+ started. This is typically the same number as the port number contained
+ in the passed address, but will likely be different if the port number
+ contained in the passed address was zero.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server's service of RPCs.
+
+ This method may only be called while the server is not serving RPCs (i.e. it
+ is not idempotent).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server's service of RPCs.
+
+ All calls to this method immediately stop service of new RPCs. When existing
+ RPCs are aborted is controlled by the grace period parameter passed to this
+ method.
+
+ This method may be called at any time and is idempotent. Passing a smaller
+ grace value than has been passed in a previous call will have the effect of
+ stopping the Server sooner. Passing a larger grace value than has been
+ passed in a previous call will not have the effect of stopping the server
+ later.
+
+ Args:
+ grace: A duration of time in seconds to allow existing RPCs to complete
+ before being aborted by this Server's stopping. May be zero for
+ immediate abortion of all in-progress RPCs.
+
+ Returns:
+ A threading.Event that will be set when this Server has completely
+ stopped. The returned event may not be set until after the full grace
+ period (if some ongoing RPC continues for the full length of the period)
+ of it may be set much sooner (such as if this Server had no RPCs underway
+ at the time it was stopped or if all RPCs that it had underway completed
+ very early in the grace period).
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py2/grpc/beta/utilities.py b/contrib/python/grpcio/py2/grpc/beta/utilities.py
new file mode 100644
index 0000000000..fe3ce606c9
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/beta/utilities.py
@@ -0,0 +1,149 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for the gRPC Python Beta API."""
+
+import threading
+import time
+
+# implementations is referenced from specification in this module.
+from grpc.beta import implementations # pylint: disable=unused-import
+from grpc.beta import interfaces
+from grpc.framework.foundation import callable_util
+from grpc.framework.foundation import future
+
+_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
+ 'Exception calling connectivity future "done" callback!')
+
+
+class _ChannelReadyFuture(future.Future):
+
+ def __init__(self, channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout):
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise future.CancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise future.TimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity):
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is interfaces.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancel(self):
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return False
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ return True
+
+ def cancelled(self):
+ with self._condition:
+ return self._cancelled
+
+ def running(self):
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self):
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def exception(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def traceback(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def add_done_callback(self, fn):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
+
+
+def channel_ready_future(channel):
+ """Creates a future.Future tracking when an implementations.Channel is ready.
+
+ Cancelling the returned future.Future does not tell the given
+ implementations.Channel to abandon attempts it may have been making to
+ connect; cancelling merely deactivates the return future.Future's
+ subscription to the given implementations.Channel's connectivity.
+
+ Args:
+ channel: An implementations.Channel.
+
+ Returns:
+ A future.Future that matures when the given Channel has connectivity
+ interfaces.ChannelConnectivity.READY.
+ """
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/contrib/python/grpcio/py2/grpc/experimental/__init__.py b/contrib/python/grpcio/py2/grpc/experimental/__init__.py
new file mode 100644
index 0000000000..f0d142c981
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/experimental/__init__.py
@@ -0,0 +1,128 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's experimental APIs.
+
+These APIs are subject to be removed during any minor version release.
+"""
+
+import copy
+import functools
+import sys
+import warnings
+
+import grpc
+from grpc._cython import cygrpc as _cygrpc
+
+_EXPERIMENTAL_APIS_USED = set()
+
+
+class ChannelOptions(object):
+ """Indicates a channel option unique to gRPC Python.
+
+ This enumeration is part of an EXPERIMENTAL API.
+
+ Attributes:
+ SingleThreadedUnaryStream: Perform unary-stream RPCs on a single thread.
+ """
+ SingleThreadedUnaryStream = "SingleThreadedUnaryStream"
+
+
+class UsageError(Exception):
+ """Raised by the gRPC library to indicate usage not allowed by the API."""
+
+
+# It's important that there be a single insecure credentials object so that its
+# hash is deterministic and can be used for indexing in the simple stubs cache.
+_insecure_channel_credentials = grpc.ChannelCredentials(
+ _cygrpc.channel_credentials_insecure())
+
+
+def insecure_channel_credentials():
+ """Creates a ChannelCredentials for use with an insecure channel.
+
+ THIS IS AN EXPERIMENTAL API.
+ """
+ return _insecure_channel_credentials
+
+
+class ExperimentalApiWarning(Warning):
+ """A warning that an API is experimental."""
+
+
+def _warn_experimental(api_name, stack_offset):
+ if api_name not in _EXPERIMENTAL_APIS_USED:
+ _EXPERIMENTAL_APIS_USED.add(api_name)
+ msg = ("'{}' is an experimental API. It is subject to change or ".
+ format(api_name) +
+ "removal between minor releases. Proceed with caution.")
+ warnings.warn(msg, ExperimentalApiWarning, stacklevel=2 + stack_offset)
+
+
+def experimental_api(f):
+
+ @functools.wraps(f)
+ def _wrapper(*args, **kwargs):
+ _warn_experimental(f.__name__, 1)
+ return f(*args, **kwargs)
+
+ return _wrapper
+
+
+def wrap_server_method_handler(wrapper, handler):
+ """Wraps the server method handler function.
+
+ The server implementation requires all server handlers being wrapped as
+ RpcMethodHandler objects. This helper function ease the pain of writing
+ server handler wrappers.
+
+ Args:
+ wrapper: A wrapper function that takes in a method handler behavior
+ (the actual function) and returns a wrapped function.
+ handler: A RpcMethodHandler object to be wrapped.
+
+ Returns:
+ A newly created RpcMethodHandler.
+ """
+ if not handler:
+ return None
+
+ if not handler.request_streaming:
+ if not handler.response_streaming:
+ # NOTE(lidiz) _replace is a public API:
+ # https://docs.python.org/dev/library/collections.html
+ return handler._replace(unary_unary=wrapper(handler.unary_unary))
+ else:
+ return handler._replace(unary_stream=wrapper(handler.unary_stream))
+ else:
+ if not handler.response_streaming:
+ return handler._replace(stream_unary=wrapper(handler.stream_unary))
+ else:
+ return handler._replace(
+ stream_stream=wrapper(handler.stream_stream))
+
+
+__all__ = (
+ 'ChannelOptions',
+ 'ExperimentalApiWarning',
+ 'UsageError',
+ 'insecure_channel_credentials',
+ 'wrap_server_method_handler',
+)
+
+if sys.version_info > (3, 6):
+ from grpc._simple_stubs import stream_stream
+ from grpc._simple_stubs import stream_unary
+ from grpc._simple_stubs import unary_stream
+ from grpc._simple_stubs import unary_unary
+ __all__ = __all__ + (unary_unary, unary_stream, stream_unary, stream_stream)
diff --git a/contrib/python/grpcio/py2/grpc/experimental/gevent.py b/contrib/python/grpcio/py2/grpc/experimental/gevent.py
new file mode 100644
index 0000000000..159d612b4e
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/experimental/gevent.py
@@ -0,0 +1,27 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's Python gEvent APIs."""
+
+from grpc._cython import cygrpc as _cygrpc
+
+
+def init_gevent():
+ """Patches gRPC's libraries to be compatible with gevent.
+
+ This must be called AFTER the python standard lib has been patched,
+ but BEFORE creating and gRPC objects.
+
+ In order for progress to be made, the application must drive the event loop.
+ """
+ _cygrpc.init_grpc_gevent()
diff --git a/contrib/python/grpcio/py2/grpc/experimental/session_cache.py b/contrib/python/grpcio/py2/grpc/experimental/session_cache.py
new file mode 100644
index 0000000000..5c55f7c327
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/experimental/session_cache.py
@@ -0,0 +1,45 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's APIs for TLS Session Resumption support"""
+
+from grpc._cython import cygrpc as _cygrpc
+
+
+def ssl_session_cache_lru(capacity):
+ """Creates an SSLSessionCache with LRU replacement policy
+
+ Args:
+ capacity: Size of the cache
+
+ Returns:
+ An SSLSessionCache with LRU replacement policy that can be passed as a value for
+ the grpc.ssl_session_cache option to a grpc.Channel. SSL session caches are used
+ to store session tickets, which clients can present to resume previous TLS sessions
+ with a server.
+ """
+ return SSLSessionCache(_cygrpc.SSLSessionCacheLRU(capacity))
+
+
+class SSLSessionCache(object):
+ """An encapsulation of a session cache used for TLS session resumption.
+
+ Instances of this class can be passed to a Channel as values for the
+ grpc.ssl_session_cache option
+ """
+
+ def __init__(self, cache):
+ self._cache = cache
+
+ def __int__(self):
+ return int(self._cache)
diff --git a/contrib/python/grpcio/py2/grpc/framework/__init__.py b/contrib/python/grpcio/py2/grpc/framework/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/framework/common/__init__.py b/contrib/python/grpcio/py2/grpc/framework/common/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/framework/common/cardinality.py b/contrib/python/grpcio/py2/grpc/framework/common/cardinality.py
new file mode 100644
index 0000000000..c98735622d
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/common/cardinality.py
@@ -0,0 +1,26 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Defines an enum for classifying RPC methods by streaming semantics."""
+
+import enum
+
+
+@enum.unique
+class Cardinality(enum.Enum):
+ """Describes the streaming semantics of an RPC method."""
+
+ UNARY_UNARY = 'request-unary/response-unary'
+ UNARY_STREAM = 'request-unary/response-streaming'
+ STREAM_UNARY = 'request-streaming/response-unary'
+ STREAM_STREAM = 'request-streaming/response-streaming'
diff --git a/contrib/python/grpcio/py2/grpc/framework/common/style.py b/contrib/python/grpcio/py2/grpc/framework/common/style.py
new file mode 100644
index 0000000000..f6138d417f
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/common/style.py
@@ -0,0 +1,24 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Defines an enum for classifying RPC methods by control flow semantics."""
+
+import enum
+
+
+@enum.unique
+class Service(enum.Enum):
+ """Describes the control flow style of RPC method implementation."""
+
+ INLINE = 'inline'
+ EVENT = 'event'
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/__init__.py b/contrib/python/grpcio/py2/grpc/framework/foundation/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/abandonment.py b/contrib/python/grpcio/py2/grpc/framework/foundation/abandonment.py
new file mode 100644
index 0000000000..660ce991c4
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/abandonment.py
@@ -0,0 +1,22 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for indicating abandonment of computation."""
+
+
+class Abandoned(Exception):
+ """Indicates that some computation is being abandoned.
+
+ Abandoning a computation is different than returning a value or raising
+ an exception indicating some operational or programming defect.
+ """
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/callable_util.py b/contrib/python/grpcio/py2/grpc/framework/foundation/callable_util.py
new file mode 100644
index 0000000000..24daf3406f
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/callable_util.py
@@ -0,0 +1,96 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for working with callables."""
+
+import abc
+import collections
+import enum
+import functools
+import logging
+
+import six
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Outcome(six.with_metaclass(abc.ABCMeta)):
+ """A sum type describing the outcome of some call.
+
+ Attributes:
+ kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the
+ call returned a value or raised an exception.
+ return_value: The value returned by the call. Must be present if kind is
+ Kind.RETURNED.
+ exception: The exception raised by the call. Must be present if kind is
+ Kind.RAISED.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+ """Identifies the general kind of the outcome of some call."""
+
+ RETURNED = object()
+ RAISED = object()
+
+
+class _EasyOutcome(
+ collections.namedtuple('_EasyOutcome',
+ ['kind', 'return_value', 'exception']), Outcome):
+ """A trivial implementation of Outcome."""
+
+
+def _call_logging_exceptions(behavior, message, *args, **kwargs):
+ try:
+ return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs),
+ None)
+ except Exception as e: # pylint: disable=broad-except
+ _LOGGER.exception(message)
+ return _EasyOutcome(Outcome.Kind.RAISED, None, e)
+
+
+def with_exceptions_logged(behavior, message):
+ """Wraps a callable in a try-except that logs any exceptions it raises.
+
+ Args:
+ behavior: Any callable.
+ message: A string to log if the behavior raises an exception.
+
+ Returns:
+ A callable that when executed invokes the given behavior. The returned
+ callable takes the same arguments as the given behavior but returns a
+ future.Outcome describing whether the given behavior returned a value or
+ raised an exception.
+ """
+
+ @functools.wraps(behavior)
+ def wrapped_behavior(*args, **kwargs):
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
+
+ return wrapped_behavior
+
+
+def call_logging_exceptions(behavior, message, *args, **kwargs):
+ """Calls a behavior in a try-except that logs any exceptions it raises.
+
+ Args:
+ behavior: Any callable.
+ message: A string to log if the behavior raises an exception.
+ *args: Positional arguments to pass to the given behavior.
+ **kwargs: Keyword arguments to pass to the given behavior.
+
+ Returns:
+ An Outcome describing whether the given behavior returned a value or raised
+ an exception.
+ """
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/future.py b/contrib/python/grpcio/py2/grpc/framework/foundation/future.py
new file mode 100644
index 0000000000..d11679cc3d
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/future.py
@@ -0,0 +1,221 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""A Future interface.
+
+Python doesn't have a Future interface in its standard library. In the absence
+of such a standard, three separate, incompatible implementations
+(concurrent.futures.Future, ndb.Future, and asyncio.Future) have appeared. This
+interface attempts to be as compatible as possible with
+concurrent.futures.Future. From ndb.Future it adopts a traceback-object accessor
+method.
+
+Unlike the concrete and implemented Future classes listed above, the Future
+class defined in this module is an entirely abstract interface that anyone may
+implement and use.
+
+The one known incompatibility between this interface and the interface of
+concurrent.futures.Future is that this interface defines its own CancelledError
+and TimeoutError exceptions rather than raising the implementation-private
+concurrent.futures._base.CancelledError and the
+built-in-but-only-in-3.3-and-later TimeoutError.
+"""
+
+import abc
+
+import six
+
+
+class TimeoutError(Exception):
+ """Indicates that a particular call timed out."""
+
+
+class CancelledError(Exception):
+ """Indicates that the computation underlying a Future was cancelled."""
+
+
+class Future(six.with_metaclass(abc.ABCMeta)):
+ """A representation of a computation in another control flow.
+
+ Computations represented by a Future may be yet to be begun, may be ongoing,
+ or may have already completed.
+ """
+
+ # NOTE(nathaniel): This isn't the return type that I would want to have if it
+ # were up to me. Were this interface being written from scratch, the return
+ # type of this method would probably be a sum type like:
+ #
+ # NOT_COMMENCED
+ # COMMENCED_AND_NOT_COMPLETED
+ # PARTIAL_RESULT<Partial_Result_Type>
+ # COMPLETED<Result_Type>
+ # UNCANCELLABLE
+ # NOT_IMMEDIATELY_DETERMINABLE
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
+
+ This method does not block.
+
+ Returns:
+ True if the computation has not yet begun, will not be allowed to take
+ place, and determination of both was possible without blocking. False
+ under all other circumstances including but not limited to the
+ computation's already having begun, the computation's already having
+ finished, and the computation's having been scheduled for execution on a
+ remote system for which a determination of whether or not it commenced
+ before being cancelled cannot be made without blocking.
+ """
+ raise NotImplementedError()
+
+ # NOTE(nathaniel): Here too this isn't the return type that I'd want this
+ # method to have if it were up to me. I think I'd go with another sum type
+ # like:
+ #
+ # NOT_CANCELLED (this object's cancel method hasn't been called)
+ # NOT_COMMENCED
+ # COMMENCED_AND_NOT_COMPLETED
+ # PARTIAL_RESULT<Partial_Result_Type>
+ # COMPLETED<Result_Type>
+ # UNCANCELLABLE
+ # NOT_IMMEDIATELY_DETERMINABLE
+ #
+ # Notice how giving the cancel method the right semantics obviates most
+ # reasons for this method to exist.
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
+
+ This method does not block.
+
+ Returns:
+ True if the computation was cancelled any time before its result became
+ immediately available. False under all other circumstances including but
+ not limited to this object's cancel method not having been called and
+ the computation's result having become immediately available.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
+
+ This method does not block.
+
+ Returns:
+ True if the computation is scheduled to take place in the future or is
+ taking place now, or False if the computation took place in the past or
+ was cancelled.
+ """
+ raise NotImplementedError()
+
+ # NOTE(nathaniel): These aren't quite the semantics I'd like here either. I
+ # would rather this only returned True in cases in which the underlying
+ # computation completed successfully. A computation's having been cancelled
+ # conflicts with considering that computation "done".
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
+
+ This method does not block.
+
+ Returns:
+ True if the computation is known to have either completed or have been
+ unscheduled or interrupted. False if the computation may possibly be
+ executing or scheduled to execute later.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Accesses the outcome of the computation or raises its exception.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ finish or be cancelled, or None if this method should block until the
+ computation has finished or is cancelled no matter how long that takes.
+
+ Returns:
+ The return value of the computation.
+
+ Raises:
+ TimeoutError: If a timeout value is passed and the computation does not
+ terminate within the allotted time.
+ CancelledError: If the computation was cancelled.
+ Exception: If the computation raised an exception, this call will raise
+ the same exception.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled, or None if this method should block until
+ the computation is terminated or is cancelled no matter how long that
+ takes.
+
+ Returns:
+ The exception raised by the computation, or None if the computation did
+ not raise an exception.
+
+ Raises:
+ TimeoutError: If a timeout value is passed and the computation does not
+ terminate within the allotted time.
+ CancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled, or None if this method should block until
+ the computation is terminated or is cancelled no matter how long that
+ takes.
+
+ Returns:
+ The traceback of the exception raised by the computation, or None if the
+ computation did not raise an exception.
+
+ Raises:
+ TimeoutError: If a timeout value is passed and the computation does not
+ terminate within the allotted time.
+ CancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
+
+ The callback will be passed this Future object describing the outcome of
+ the computation.
+
+ If the computation has already completed, the callback will be called
+ immediately.
+
+ Args:
+ fn: A callable taking this Future object as its single parameter.
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/logging_pool.py b/contrib/python/grpcio/py2/grpc/framework/foundation/logging_pool.py
new file mode 100644
index 0000000000..53d2cd0082
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/logging_pool.py
@@ -0,0 +1,71 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""A thread pool that logs exceptions raised by tasks executed within it."""
+
+from concurrent import futures
+import logging
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _wrap(behavior):
+ """Wraps an arbitrary callable behavior in exception-logging."""
+
+ def _wrapping(*args, **kwargs):
+ try:
+ return behavior(*args, **kwargs)
+ except Exception:
+ _LOGGER.exception(
+ 'Unexpected exception from %s executed in logging pool!',
+ behavior)
+ raise
+
+ return _wrapping
+
+
+class _LoggingPool(object):
+ """An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
+
+ def __init__(self, backing_pool):
+ self._backing_pool = backing_pool
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._backing_pool.shutdown(wait=True)
+
+ def submit(self, fn, *args, **kwargs):
+ return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
+
+ def map(self, func, *iterables, **kwargs):
+ return self._backing_pool.map(_wrap(func),
+ *iterables,
+ timeout=kwargs.get('timeout', None))
+
+ def shutdown(self, wait=True):
+ self._backing_pool.shutdown(wait=wait)
+
+
+def pool(max_workers):
+ """Creates a thread pool that logs exceptions raised by the tasks within it.
+
+ Args:
+ max_workers: The maximum number of worker threads to allow the pool.
+
+ Returns:
+ A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
+ raised by the tasks executed within it.
+ """
+ return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/stream.py b/contrib/python/grpcio/py2/grpc/framework/foundation/stream.py
new file mode 100644
index 0000000000..fd47977b89
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/stream.py
@@ -0,0 +1,45 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interfaces related to streams of values or objects."""
+
+import abc
+
+import six
+
+
+class Consumer(six.with_metaclass(abc.ABCMeta)):
+ """Interface for consumers of finite streams of values or objects."""
+
+ @abc.abstractmethod
+ def consume(self, value):
+ """Accepts a value.
+
+ Args:
+ value: Any value accepted by this Consumer.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminate(self):
+ """Indicates to this Consumer that no more values will be supplied."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume_and_terminate(self, value):
+ """Supplies a value and signals that no more values will be supplied.
+
+ Args:
+ value: Any value accepted by this Consumer.
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py2/grpc/framework/foundation/stream_util.py b/contrib/python/grpcio/py2/grpc/framework/foundation/stream_util.py
new file mode 100644
index 0000000000..1faaf29bd7
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/foundation/stream_util.py
@@ -0,0 +1,148 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Helpful utilities related to the stream module."""
+
+import logging
+import threading
+
+from grpc.framework.foundation import stream
+
+_NO_VALUE = object()
+_LOGGER = logging.getLogger(__name__)
+
+
+class TransformingConsumer(stream.Consumer):
+ """A stream.Consumer that passes a transformation of its input to another."""
+
+ def __init__(self, transformation, downstream):
+ self._transformation = transformation
+ self._downstream = downstream
+
+ def consume(self, value):
+ self._downstream.consume(self._transformation(value))
+
+ def terminate(self):
+ self._downstream.terminate()
+
+ def consume_and_terminate(self, value):
+ self._downstream.consume_and_terminate(self._transformation(value))
+
+
+class IterableConsumer(stream.Consumer):
+ """A Consumer that when iterated over emits the values it has consumed."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._active = True
+
+ def consume(self, value):
+ with self._condition:
+ if self._active:
+ self._values.append(value)
+ self._condition.notify()
+
+ def terminate(self):
+ with self._condition:
+ self._active = False
+ self._condition.notify()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ if self._active:
+ self._values.append(value)
+ self._active = False
+ self._condition.notify()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self.next()
+
+ def next(self):
+ with self._condition:
+ while self._active and not self._values:
+ self._condition.wait()
+ if self._values:
+ return self._values.pop(0)
+ else:
+ raise StopIteration()
+
+
+class ThreadSwitchingConsumer(stream.Consumer):
+ """A Consumer decorator that affords serialization and asynchrony."""
+
+ def __init__(self, sink, pool):
+ self._lock = threading.Lock()
+ self._sink = sink
+ self._pool = pool
+ # True if self._spin has been submitted to the pool to be called once and
+ # that call has not yet returned, False otherwise.
+ self._spinning = False
+ self._values = []
+ self._active = True
+
+ def _spin(self, sink, value, terminate):
+ while True:
+ try:
+ if value is _NO_VALUE:
+ sink.terminate()
+ elif terminate:
+ sink.consume_and_terminate(value)
+ else:
+ sink.consume(value)
+ except Exception as e: # pylint:disable=broad-except
+ _LOGGER.exception(e)
+
+ with self._lock:
+ if terminate:
+ self._spinning = False
+ return
+ elif self._values:
+ value = self._values.pop(0)
+ terminate = not self._values and not self._active
+ elif not self._active:
+ value = _NO_VALUE
+ terminate = True
+ else:
+ self._spinning = False
+ return
+
+ def consume(self, value):
+ with self._lock:
+ if self._active:
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, False)
+ self._spinning = True
+
+ def terminate(self):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if not self._spinning:
+ self._pool.submit(self._spin, self._sink, _NO_VALUE, True)
+ self._spinning = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, True)
+ self._spinning = True
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/__init__.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/base/__init__.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/base/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/base/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/base/base.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/base/base.py
new file mode 100644
index 0000000000..9e63d6a931
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/base/base.py
@@ -0,0 +1,327 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""The base interface of RPC Framework.
+
+Implementations of this interface support the conduct of "operations":
+exchanges between two distinct ends of an arbitrary number of data payloads
+and metadata such as a name for the operation, initial and terminal metadata
+in each direction, and flow control. These operations may be used for transfers
+of data, remote procedure calls, status indication, or anything else
+applications choose.
+"""
+
+# threading is referenced from specification in this module.
+import abc
+import enum
+import threading # pylint: disable=unused-import
+
+import six
+
+# pylint: disable=too-many-arguments
+
+
+class NoSuchMethodError(Exception):
+ """Indicates that an unrecognized operation has been called.
+
+ Attributes:
+ code: A code value to communicate to the other side of the operation
+ along with indication of operation termination. May be None.
+ details: A details value to communicate to the other side of the
+ operation along with indication of operation termination. May be None.
+ """
+
+ def __init__(self, code, details):
+ """Constructor.
+
+ Args:
+ code: A code value to communicate to the other side of the operation
+ along with indication of operation termination. May be None.
+ details: A details value to communicate to the other side of the
+ operation along with indication of operation termination. May be None.
+ """
+ super(NoSuchMethodError, self).__init__()
+ self.code = code
+ self.details = details
+
+
+class Outcome(object):
+ """The outcome of an operation.
+
+ Attributes:
+ kind: A Kind value coarsely identifying how the operation terminated.
+ code: An application-specific code value or None if no such value was
+ provided.
+ details: An application-specific details value or None if no such value was
+ provided.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+ """Ways in which an operation can terminate."""
+
+ COMPLETED = 'completed'
+ CANCELLED = 'cancelled'
+ EXPIRED = 'expired'
+ LOCAL_SHUTDOWN = 'local shutdown'
+ REMOTE_SHUTDOWN = 'remote shutdown'
+ RECEPTION_FAILURE = 'reception failure'
+ TRANSMISSION_FAILURE = 'transmission failure'
+ LOCAL_FAILURE = 'local failure'
+ REMOTE_FAILURE = 'remote failure'
+
+
+class Completion(six.with_metaclass(abc.ABCMeta)):
+ """An aggregate of the values exchanged upon operation completion.
+
+ Attributes:
+ terminal_metadata: A terminal metadata value for the operaton.
+ code: A code value for the operation.
+ message: A message value for the operation.
+ """
+
+
+class OperationContext(six.with_metaclass(abc.ABCMeta)):
+ """Provides operation-related information and action."""
+
+ @abc.abstractmethod
+ def outcome(self):
+ """Indicates the operation's outcome (or that the operation is ongoing).
+
+ Returns:
+ None if the operation is still active or the Outcome value for the
+ operation if it has terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_termination_callback(self, callback):
+ """Adds a function to be called upon operation termination.
+
+ Args:
+ callback: A callable to be passed an Outcome value on operation
+ termination.
+
+ Returns:
+ None if the operation has not yet terminated and the passed callback will
+ later be called when it does terminate, or if the operation has already
+ terminated an Outcome value describing the operation termination and the
+ passed callback will not be called as a result of this method call.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the operation.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the operation to complete before it is considered to have
+ timed out. Zero is returned if the operation has terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the operation if the operation has not yet terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def fail(self, exception):
+ """Indicates that the operation has failed.
+
+ Args:
+ exception: An exception germane to the operation failure. May be None.
+ """
+ raise NotImplementedError()
+
+
+class Operator(six.with_metaclass(abc.ABCMeta)):
+ """An interface through which to participate in an operation."""
+
+ @abc.abstractmethod
+ def advance(self,
+ initial_metadata=None,
+ payload=None,
+ completion=None,
+ allowance=None):
+ """Progresses the operation.
+
+ Args:
+ initial_metadata: An initial metadata value. Only one may ever be
+ communicated in each direction for an operation, and they must be
+ communicated no later than either the first payload or the completion.
+ payload: A payload value.
+ completion: A Completion value. May only ever be non-None once in either
+ direction, and no payloads may be passed after it has been communicated.
+ allowance: A positive integer communicating the number of additional
+ payloads allowed to be passed by the remote side of the operation.
+ """
+ raise NotImplementedError()
+
+
+class ProtocolReceiver(six.with_metaclass(abc.ABCMeta)):
+ """A means of receiving protocol values during an operation."""
+
+ @abc.abstractmethod
+ def context(self, protocol_context):
+ """Accepts the protocol context object for the operation.
+
+ Args:
+ protocol_context: The protocol context object for the operation.
+ """
+ raise NotImplementedError()
+
+
+class Subscription(six.with_metaclass(abc.ABCMeta)):
+ """Describes customer code's interest in values from the other side.
+
+ Attributes:
+ kind: A Kind value describing the overall kind of this value.
+ termination_callback: A callable to be passed the Outcome associated with
+ the operation after it has terminated. Must be non-None if kind is
+ Kind.TERMINATION_ONLY. Must be None otherwise.
+ allowance: A callable behavior that accepts positive integers representing
+ the number of additional payloads allowed to be passed to the other side
+ of the operation. Must be None if kind is Kind.FULL. Must not be None
+ otherwise.
+ operator: An Operator to be passed values from the other side of the
+ operation. Must be non-None if kind is Kind.FULL. Must be None otherwise.
+ protocol_receiver: A ProtocolReceiver to be passed protocol objects as they
+ become available during the operation. Must be non-None if kind is
+ Kind.FULL.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+
+ NONE = 'none'
+ TERMINATION_ONLY = 'termination only'
+ FULL = 'full'
+
+
+class Servicer(six.with_metaclass(abc.ABCMeta)):
+ """Interface for service implementations."""
+
+ @abc.abstractmethod
+ def service(self, group, method, context, output_operator):
+ """Services an operation.
+
+ Args:
+ group: The group identifier of the operation to be serviced.
+ method: The method identifier of the operation to be serviced.
+ context: An OperationContext object affording contextual information and
+ actions.
+ output_operator: An Operator that will accept output values of the
+ operation.
+
+ Returns:
+ A Subscription via which this object may or may not accept more values of
+ the operation.
+
+ Raises:
+ NoSuchMethodError: If this Servicer does not handle operations with the
+ given group and method.
+ abandonment.Abandoned: If the operation has been aborted and there no
+ longer is any reason to service the operation.
+ """
+ raise NotImplementedError()
+
+
+class End(six.with_metaclass(abc.ABCMeta)):
+ """Common type for entry-point objects on both sides of an operation."""
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this object's service of operations."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this object's service of operations.
+
+ This object will refuse service of new operations as soon as this method is
+ called but operations under way at the time of the call may be given a
+ grace period during which they are allowed to finish.
+
+ Args:
+ grace: A duration of time in seconds to allow ongoing operations to
+ terminate before being forcefully terminated by the stopping of this
+ End. May be zero to terminate all ongoing operations and immediately
+ stop.
+
+ Returns:
+ A threading.Event that will be set to indicate all operations having
+ terminated and this End having completely stopped. The returned event
+ may not be set until after the full grace period (if some ongoing
+ operation continues for the full length of the period) or it may be set
+ much sooner (if for example this End had no operations in progress at
+ the time its stop method was called).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def operate(self,
+ group,
+ method,
+ subscription,
+ timeout,
+ initial_metadata=None,
+ payload=None,
+ completion=None,
+ protocol_options=None):
+ """Commences an operation.
+
+ Args:
+ group: The group identifier of the invoked operation.
+ method: The method identifier of the invoked operation.
+ subscription: A Subscription to which the results of the operation will be
+ passed.
+ timeout: A length of time in seconds to allow for the operation.
+ initial_metadata: An initial metadata value to be sent to the other side
+ of the operation. May be None if the initial metadata will be later
+ passed via the returned operator or if there will be no initial metadata
+ passed at all.
+ payload: An initial payload for the operation.
+ completion: A Completion value indicating the end of transmission to the
+ other side of the operation.
+ protocol_options: A value specified by the provider of a Base interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A pair of objects affording information about the operation and action
+ continuing the operation. The first element of the returned pair is an
+ OperationContext for the operation and the second element of the
+ returned pair is an Operator to which operation values not passed in
+ this call should later be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def operation_stats(self):
+ """Reports the number of terminated operations broken down by outcome.
+
+ Returns:
+ A dictionary from Outcome.Kind value to an integer identifying the number
+ of operations that terminated with that outcome kind.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_idle_action(self, action):
+ """Adds an action to be called when this End has no ongoing operations.
+
+ Args:
+ action: A callable that accepts no arguments.
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/base/utilities.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/base/utilities.py
new file mode 100644
index 0000000000..281db62b5d
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/base/utilities.py
@@ -0,0 +1,71 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for use with the base interface of RPC Framework."""
+
+import collections
+
+from grpc.framework.interfaces.base import base
+
+
+class _Completion(base.Completion,
+ collections.namedtuple('_Completion', (
+ 'terminal_metadata',
+ 'code',
+ 'message',
+ ))):
+ """A trivial implementation of base.Completion."""
+
+
+class _Subscription(base.Subscription,
+ collections.namedtuple('_Subscription', (
+ 'kind',
+ 'termination_callback',
+ 'allowance',
+ 'operator',
+ 'protocol_receiver',
+ ))):
+ """A trivial implementation of base.Subscription."""
+
+
+_NONE_SUBSCRIPTION = _Subscription(base.Subscription.Kind.NONE, None, None,
+ None, None)
+
+
+def completion(terminal_metadata, code, message):
+ """Creates a base.Completion aggregating the given operation values.
+
+ Args:
+ terminal_metadata: A terminal metadata value for an operaton.
+ code: A code value for an operation.
+ message: A message value for an operation.
+
+ Returns:
+ A base.Completion aggregating the given operation values.
+ """
+ return _Completion(terminal_metadata, code, message)
+
+
+def full_subscription(operator, protocol_receiver):
+ """Creates a "full" base.Subscription for the given base.Operator.
+
+ Args:
+ operator: A base.Operator to be used in an operation.
+ protocol_receiver: A base.ProtocolReceiver to be used in an operation.
+
+ Returns:
+ A base.Subscription of kind base.Subscription.Kind.FULL wrapping the given
+ base.Operator and base.ProtocolReceiver.
+ """
+ return _Subscription(base.Subscription.Kind.FULL, None, None, operator,
+ protocol_receiver)
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/face/__init__.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/face/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/face/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/face/face.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/face/face.py
new file mode 100644
index 0000000000..be173978c0
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/face/face.py
@@ -0,0 +1,1050 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interfaces defining the Face layer of RPC Framework."""
+
+import abc
+import collections
+import enum
+
+# cardinality, style, abandonment, future, and stream are
+# referenced from specification in this module.
+from grpc.framework.common import cardinality # pylint: disable=unused-import
+from grpc.framework.common import style # pylint: disable=unused-import
+from grpc.framework.foundation import future # pylint: disable=unused-import
+from grpc.framework.foundation import stream # pylint: disable=unused-import
+import six
+
+# pylint: disable=too-many-arguments
+
+
+class NoSuchMethodError(Exception):
+ """Raised by customer code to indicate an unrecognized method.
+
+ Attributes:
+ group: The group of the unrecognized method.
+ name: The name of the unrecognized method.
+ """
+
+ def __init__(self, group, method):
+ """Constructor.
+
+ Args:
+ group: The group identifier of the unrecognized RPC name.
+ method: The method identifier of the unrecognized RPC name.
+ """
+ super(NoSuchMethodError, self).__init__()
+ self.group = group
+ self.method = method
+
+ def __repr__(self):
+ return 'face.NoSuchMethodError(%s, %s)' % (
+ self.group,
+ self.method,
+ )
+
+
+class Abortion(
+ collections.namedtuple('Abortion', (
+ 'kind',
+ 'initial_metadata',
+ 'terminal_metadata',
+ 'code',
+ 'details',
+ ))):
+ """A value describing RPC abortion.
+
+ Attributes:
+ kind: A Kind value identifying how the RPC failed.
+ initial_metadata: The initial metadata from the other side of the RPC or
+ None if no initial metadata value was received.
+ terminal_metadata: The terminal metadata from the other side of the RPC or
+ None if no terminal metadata value was received.
+ code: The code value from the other side of the RPC or None if no code value
+ was received.
+ details: The details value from the other side of the RPC or None if no
+ details value was received.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+ """Types of RPC abortion."""
+
+ CANCELLED = 'cancelled'
+ EXPIRED = 'expired'
+ LOCAL_SHUTDOWN = 'local shutdown'
+ REMOTE_SHUTDOWN = 'remote shutdown'
+ NETWORK_FAILURE = 'network failure'
+ LOCAL_FAILURE = 'local failure'
+ REMOTE_FAILURE = 'remote failure'
+
+
+class AbortionError(six.with_metaclass(abc.ABCMeta, Exception)):
+ """Common super type for exceptions indicating RPC abortion.
+
+ initial_metadata: The initial metadata from the other side of the RPC or
+ None if no initial metadata value was received.
+ terminal_metadata: The terminal metadata from the other side of the RPC or
+ None if no terminal metadata value was received.
+ code: The code value from the other side of the RPC or None if no code value
+ was received.
+ details: The details value from the other side of the RPC or None if no
+ details value was received.
+ """
+
+ def __init__(self, initial_metadata, terminal_metadata, code, details):
+ super(AbortionError, self).__init__()
+ self.initial_metadata = initial_metadata
+ self.terminal_metadata = terminal_metadata
+ self.code = code
+ self.details = details
+
+ def __str__(self):
+ return '%s(code=%s, details="%s")' % (self.__class__.__name__,
+ self.code, self.details)
+
+
+class CancellationError(AbortionError):
+ """Indicates that an RPC has been cancelled."""
+
+
+class ExpirationError(AbortionError):
+ """Indicates that an RPC has expired ("timed out")."""
+
+
+class LocalShutdownError(AbortionError):
+ """Indicates that an RPC has terminated due to local shutdown of RPCs."""
+
+
+class RemoteShutdownError(AbortionError):
+ """Indicates that an RPC has terminated due to remote shutdown of RPCs."""
+
+
+class NetworkError(AbortionError):
+ """Indicates that some error occurred on the network."""
+
+
+class LocalError(AbortionError):
+ """Indicates that an RPC has terminated due to a local defect."""
+
+
+class RemoteError(AbortionError):
+ """Indicates that an RPC has terminated due to a remote defect."""
+
+
+class RpcContext(six.with_metaclass(abc.ABCMeta)):
+ """Provides RPC-related information and action."""
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have timed
+ out.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_abortion_callback(self, abortion_callback):
+ """Registers a callback to be called if the RPC is aborted.
+
+ Args:
+ abortion_callback: A callable to be called and passed an Abortion value
+ in the event of RPC abortion.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
+
+ Idempotent and has no effect if the RPC has already terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def protocol_context(self):
+ """Accesses a custom object specified by an implementation provider.
+
+ Returns:
+ A value specified by the provider of a Face interface implementation
+ affording custom state and behavior.
+ """
+ raise NotImplementedError()
+
+
+class Call(six.with_metaclass(abc.ABCMeta, RpcContext)):
+ """Invocation-side utility object for an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata from the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The initial metadata object emitted by the service-side of the RPC, or
+ None if there was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminal_metadata(self):
+ """Accesses the terminal metadata from the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The terminal metadata object emitted by the service-side of the RPC, or
+ None if there was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the code emitted by the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The code object emitted by the service-side of the RPC, or None if there
+ was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details value emitted by the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The details value emitted by the service-side of the RPC, or None if there
+ was no such value.
+ """
+ raise NotImplementedError()
+
+
+class ServicerContext(six.with_metaclass(abc.ABCMeta, RpcContext)):
+ """A context object passed to method implementations."""
+
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata from the invocation-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the invocation-side of the RPC.
+
+ Returns:
+ The metadata object emitted by the invocation-side of the RPC, or None if
+ there was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def initial_metadata(self, initial_metadata):
+ """Accepts the service-side initial metadata value of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side initial metadata to transmit.
+
+ Args:
+ initial_metadata: The service-side initial metadata value of the RPC to
+ be transmitted to the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminal_metadata(self, terminal_metadata):
+ """Accepts the service-side terminal metadata value of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side terminal metadata to transmit.
+
+ Args:
+ terminal_metadata: The service-side terminal metadata value of the RPC to
+ be transmitted to the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self, code):
+ """Accepts the service-side code of the RPC.
+
+ This method need not be called by method implementations if they have no
+ code to transmit.
+
+ Args:
+ code: The code of the RPC to be transmitted to the invocation side of the
+ RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self, details):
+ """Accepts the service-side details of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side details to transmit.
+
+ Args:
+ details: The service-side details value of the RPC to be transmitted to
+ the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+
+class ResponseReceiver(six.with_metaclass(abc.ABCMeta)):
+ """Invocation-side object used to accept the output of an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self, initial_metadata):
+ """Receives the initial metadata from the service-side of the RPC.
+
+ Args:
+ initial_metadata: The initial metadata object emitted from the
+ service-side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def response(self, response):
+ """Receives a response from the service-side of the RPC.
+
+ Args:
+ response: A response object emitted from the service-side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def complete(self, terminal_metadata, code, details):
+ """Receives the completion values emitted from the service-side of the RPC.
+
+ Args:
+ terminal_metadata: The terminal metadata object emitted from the
+ service-side of the RPC.
+ code: The code object emitted from the service-side of the RPC.
+ details: The details object emitted from the service-side of the RPC.
+ """
+ raise NotImplementedError()
+
+
+class UnaryUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a unary-unary RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call for the RPC.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a unary-stream RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call object for the RPC.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a stream-unary RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A single object that is both a Call object for the RPC and a
+ stream.Consumer to which the request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamMultiCallable(six.with_metaclass(abc.ABCMeta)):
+ """Affords invoking a stream-stream RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A single object that is both a Call object for the RPC and a
+ stream.Consumer to which the request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+
+class MethodImplementation(six.with_metaclass(abc.ABCMeta)):
+ """A sum type that describes a method implementation.
+
+ Attributes:
+ cardinality: A cardinality.Cardinality value.
+ style: A style.Service value.
+ unary_unary_inline: The implementation of the method as a callable value
+ that takes a request value and a ServicerContext object and returns a
+ response value. Only non-None if cardinality is
+ cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE.
+ unary_stream_inline: The implementation of the method as a callable value
+ that takes a request value and a ServicerContext object and returns an
+ iterator of response values. Only non-None if cardinality is
+ cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE.
+ stream_unary_inline: The implementation of the method as a callable value
+ that takes an iterator of request values and a ServicerContext object and
+ returns a response value. Only non-None if cardinality is
+ cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE.
+ stream_stream_inline: The implementation of the method as a callable value
+ that takes an iterator of request values and a ServicerContext object and
+ returns an iterator of response values. Only non-None if cardinality is
+ cardinality.Cardinality.STREAM_STREAM and style is style.Service.INLINE.
+ unary_unary_event: The implementation of the method as a callable value that
+ takes a request value, a response callback to which to pass the response
+ value of the RPC, and a ServicerContext. Only non-None if cardinality is
+ cardinality.Cardinality.UNARY_UNARY and style is style.Service.EVENT.
+ unary_stream_event: The implementation of the method as a callable value
+ that takes a request value, a stream.Consumer to which to pass the
+ response values of the RPC, and a ServicerContext. Only non-None if
+ cardinality is cardinality.Cardinality.UNARY_STREAM and style is
+ style.Service.EVENT.
+ stream_unary_event: The implementation of the method as a callable value
+ that takes a response callback to which to pass the response value of the
+ RPC and a ServicerContext and returns a stream.Consumer to which the
+ request values of the RPC should be passed. Only non-None if cardinality
+ is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT.
+ stream_stream_event: The implementation of the method as a callable value
+ that takes a stream.Consumer to which to pass the response values of the
+ RPC and a ServicerContext and returns a stream.Consumer to which the
+ request values of the RPC should be passed. Only non-None if cardinality
+ is cardinality.Cardinality.STREAM_STREAM and style is
+ style.Service.EVENT.
+ """
+
+
+class MultiMethodImplementation(six.with_metaclass(abc.ABCMeta)):
+ """A general type able to service many methods."""
+
+ @abc.abstractmethod
+ def service(self, group, method, response_consumer, context):
+ """Services an RPC.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ context: a ServicerContext object.
+
+ Returns:
+ A stream.Consumer with which to accept the request values of the RPC. The
+ consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing values to this object. Implementations must not assume that this
+ object will be called to completion of the request stream or even called
+ at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ NoSuchMethodError: If this MultiMethod does not recognize the given group
+ and name for the RPC and is not able to service the RPC.
+ """
+ raise NotImplementedError()
+
+
+class GenericStub(six.with_metaclass(abc.ABCMeta)):
+ """Affords RPC invocation via generic methods."""
+
+ @abc.abstractmethod
+ def blocking_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Invokes a unary-request-unary-response method.
+
+ This method blocks until either returning the response value of the RPC
+ (in the event of RPC completion) or raising an exception (in the event of
+ RPC abortion).
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a unary-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_unary_stream(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a unary-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def blocking_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Invokes a stream-request-unary-response method.
+
+ This method blocks until either returning the response value of the RPC
+ (in the event of RPC completion) or raising an exception (in the event of
+ RPC abortion).
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a stream-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_stream_stream(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a stream-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_unary_unary(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_unary_stream(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_unary(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A pair of a Call object for the RPC and a stream.Consumer to which the
+ request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_stream(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A pair of a Call object for the RPC and a stream.Consumer to which the
+ request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_unary(self, group, method):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A UnaryUnaryMultiCallable value for the named unary-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_stream(self, group, method):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A UnaryStreamMultiCallable value for the name unary-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_unary(self, group, method):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A StreamUnaryMultiCallable value for the named stream-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_stream(self, group, method):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A StreamStreamMultiCallable value for the named stream-stream method.
+ """
+ raise NotImplementedError()
+
+
+class DynamicStub(six.with_metaclass(abc.ABCMeta)):
+ """Affords RPC invocation via attributes corresponding to afforded methods.
+
+ Instances of this type may be scoped to a single group so that attribute
+ access is unambiguous.
+
+ Instances of this type respond to attribute access as follows: if the
+ requested attribute is the name of a unary-unary method, the value of the
+ attribute will be a UnaryUnaryMultiCallable with which to invoke an RPC; if
+ the requested attribute is the name of a unary-stream method, the value of the
+ attribute will be a UnaryStreamMultiCallable with which to invoke an RPC; if
+ the requested attribute is the name of a stream-unary method, the value of the
+ attribute will be a StreamUnaryMultiCallable with which to invoke an RPC; and
+ if the requested attribute is the name of a stream-stream method, the value of
+ the attribute will be a StreamStreamMultiCallable with which to invoke an RPC.
+ """
diff --git a/contrib/python/grpcio/py2/grpc/framework/interfaces/face/utilities.py b/contrib/python/grpcio/py2/grpc/framework/interfaces/face/utilities.py
new file mode 100644
index 0000000000..f27bd67615
--- /dev/null
+++ b/contrib/python/grpcio/py2/grpc/framework/interfaces/face/utilities.py
@@ -0,0 +1,168 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for RPC Framework's Face interface."""
+
+import collections
+
+# stream is referenced from specification in this module.
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import stream # pylint: disable=unused-import
+from grpc.framework.interfaces.face import face
+
+
+class _MethodImplementation(face.MethodImplementation,
+ collections.namedtuple('_MethodImplementation', [
+ 'cardinality',
+ 'style',
+ 'unary_unary_inline',
+ 'unary_stream_inline',
+ 'stream_unary_inline',
+ 'stream_stream_inline',
+ 'unary_unary_event',
+ 'unary_stream_event',
+ 'stream_unary_event',
+ 'stream_stream_event',
+ ])):
+ pass
+
+
+def unary_unary_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-unary RPC method as a callable value
+ that takes a request value and an face.ServicerContext object and
+ returns a response value.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_UNARY,
+ style.Service.INLINE, behavior, None, None,
+ None, None, None, None, None)
+
+
+def unary_stream_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-stream RPC method as a callable
+ value that takes a request value and an face.ServicerContext object and
+ returns an iterator of response values.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_STREAM,
+ style.Service.INLINE, None, behavior, None,
+ None, None, None, None, None)
+
+
+def stream_unary_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-unary RPC method as a callable
+ value that takes an iterator of request values and an
+ face.ServicerContext object and returns a response value.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_UNARY,
+ style.Service.INLINE, None, None, behavior,
+ None, None, None, None, None)
+
+
+def stream_stream_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-stream RPC method as a callable
+ value that takes an iterator of request values and an
+ face.ServicerContext object and returns an iterator of response values.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_STREAM,
+ style.Service.INLINE, None, None, None,
+ behavior, None, None, None, None)
+
+
+def unary_unary_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-unary RPC method as a callable
+ value that takes a request value, a response callback to which to pass
+ the response value of the RPC, and an face.ServicerContext.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_UNARY,
+ style.Service.EVENT, None, None, None, None,
+ behavior, None, None, None)
+
+
+def unary_stream_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-stream RPC method as a callable
+ value that takes a request value, a stream.Consumer to which to pass the
+ the response values of the RPC, and an face.ServicerContext.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_STREAM,
+ style.Service.EVENT, None, None, None, None,
+ None, behavior, None, None)
+
+
+def stream_unary_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-unary RPC method as a callable
+ value that takes a response callback to which to pass the response value
+ of the RPC and an face.ServicerContext and returns a stream.Consumer to
+ which the request values of the RPC should be passed.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_UNARY,
+ style.Service.EVENT, None, None, None, None,
+ None, None, behavior, None)
+
+
+def stream_stream_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-stream RPC method as a callable
+ value that takes a stream.Consumer to which to pass the response values
+ of the RPC and an face.ServicerContext and returns a stream.Consumer to
+ which the request values of the RPC should be passed.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_STREAM,
+ style.Service.EVENT, None, None, None, None,
+ None, None, None, behavior)
diff --git a/contrib/python/grpcio/py2/ya.make b/contrib/python/grpcio/py2/ya.make
new file mode 100644
index 0000000000..3e014d610a
--- /dev/null
+++ b/contrib/python/grpcio/py2/ya.make
@@ -0,0 +1,89 @@
+PY2_LIBRARY()
+
+LICENSE(
+ Apache-2.0 AND
+ BSD-3-Clause AND
+ MPL-2.0 AND
+ Python-2.0
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(1.50.0)
+
+ORIGINAL_SOURCE(mirror://pypi/g/grpcio/grpcio-1.50.0.tar.gz)
+
+PEERDIR(
+ contrib/libs/grpc
+ contrib/python/six
+)
+
+PEERDIR(
+ contrib/deprecated/python/enum34
+ contrib/deprecated/python/futures
+)
+
+ADDINCL(
+ ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
+ contrib/libs/grpc
+ contrib/libs/grpc/include
+ FOR
+ cython
+ contrib/python/grpcio/py2
+)
+
+IF (SANITIZER_TYPE == undefined)
+ CXXFLAGS(-fno-sanitize=function)
+ENDIF()
+
+NO_COMPILER_WARNINGS()
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ grpc/__init__.py
+ grpc/_auth.py
+ grpc/_channel.py
+ grpc/_common.py
+ grpc/_compression.py
+ grpc/_cython/__init__.py
+ grpc/_cython/_cygrpc/__init__.py
+ grpc/_cython/cygrpc.pyx
+ grpc/_grpcio_metadata.py
+ grpc/_interceptor.py
+ grpc/_plugin_wrapping.py
+ grpc/_runtime_protos.py
+ grpc/_server.py
+ grpc/_utilities.py
+ grpc/beta/__init__.py
+ grpc/beta/_client_adaptations.py
+ grpc/beta/_metadata.py
+ grpc/beta/_server_adaptations.py
+ grpc/beta/implementations.py
+ grpc/beta/interfaces.py
+ grpc/beta/utilities.py
+ grpc/experimental/__init__.py
+ grpc/experimental/gevent.py
+ grpc/experimental/session_cache.py
+ grpc/framework/__init__.py
+ grpc/framework/common/__init__.py
+ grpc/framework/common/cardinality.py
+ grpc/framework/common/style.py
+ grpc/framework/foundation/__init__.py
+ grpc/framework/foundation/abandonment.py
+ grpc/framework/foundation/callable_util.py
+ grpc/framework/foundation/future.py
+ grpc/framework/foundation/logging_pool.py
+ grpc/framework/foundation/stream.py
+ grpc/framework/foundation/stream_util.py
+ grpc/framework/interfaces/__init__.py
+ grpc/framework/interfaces/base/__init__.py
+ grpc/framework/interfaces/base/base.py
+ grpc/framework/interfaces/base/utilities.py
+ grpc/framework/interfaces/face/__init__.py
+ grpc/framework/interfaces/face/face.py
+ grpc/framework/interfaces/face/utilities.py
+)
+
+END()
diff --git a/contrib/python/grpcio/py3/LICENSE b/contrib/python/grpcio/py3/LICENSE
new file mode 100644
index 0000000000..0e09a3e909
--- /dev/null
+++ b/contrib/python/grpcio/py3/LICENSE
@@ -0,0 +1,610 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-----------------------------------------------------------
+
+BSD 3-Clause License
+
+Copyright 2016, Google Inc.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+-----------------------------------------------------------
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/contrib/python/grpcio/py3/README.md b/contrib/python/grpcio/py3/README.md
new file mode 100644
index 0000000000..827158adf3
--- /dev/null
+++ b/contrib/python/grpcio/py3/README.md
@@ -0,0 +1,104 @@
+# gRPC – An RPC library and framework
+
+gRPC is a modern, open source, high-performance remote procedure call (RPC)
+framework that can run anywhere. gRPC enables client and server applications to
+communicate transparently, and simplifies the building of connected systems.
+
+<table>
+ <tr>
+ <td><b>Homepage:</b></td>
+ <td><a href="https://grpc.io/">grpc.io</a></td>
+ </tr>
+ <tr>
+ <td><b>Mailing List:</b></td>
+ <td><a href="https://groups.google.com/forum/#!forum/grpc-io">grpc-io@googlegroups.com</a></td>
+ </tr>
+</table>
+
+[![Join the chat at https://gitter.im/grpc/grpc](https://badges.gitter.im/grpc/grpc.svg)](https://gitter.im/grpc/grpc?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+## To start using gRPC
+
+To maximize usability, gRPC supports the standard method for adding dependencies
+to a user's chosen language (if there is one). In most languages, the gRPC
+runtime comes as a package available in a user's language package manager.
+
+For instructions on how to use the language-specific gRPC runtime for a project,
+please refer to these documents
+
+- [C++](src/cpp): follow the instructions under the `src/cpp` directory
+- [C#/.NET](https://github.com/grpc/grpc-dotnet): NuGet packages `Grpc.Net.Client`, `Grpc.AspNetCore.Server`
+- [Dart](https://github.com/grpc/grpc-dart): pub package `grpc`
+- [Go](https://github.com/grpc/grpc-go): `go get google.golang.org/grpc`
+- [Java](https://github.com/grpc/grpc-java): Use JARs from Maven Central
+ Repository
+- [Kotlin](https://github.com/grpc/grpc-kotlin): Use JARs from Maven Central
+ Repository
+- [Node](https://github.com/grpc/grpc-node): `npm install @grpc/grpc-js`
+- [Objective-C](src/objective-c): Add `gRPC-ProtoRPC` dependency to podspec
+- [PHP](src/php): `pecl install grpc`
+- [Python](src/python/grpcio): `pip install grpcio`
+- [Ruby](src/ruby): `gem install grpc`
+- [WebJS](https://github.com/grpc/grpc-web): follow the grpc-web instructions
+
+Per-language quickstart guides and tutorials can be found in the
+[documentation section on the grpc.io website](https://grpc.io/docs/). Code
+examples are available in the [examples](examples) directory.
+
+Precompiled bleeding-edge package builds of gRPC `master` branch's `HEAD` are
+uploaded daily to [packages.grpc.io](https://packages.grpc.io).
+
+## To start developing gRPC
+
+Contributions are welcome!
+
+Please read [How to contribute](CONTRIBUTING.md) which will guide you through
+the entire workflow of how to build the source code, how to run the tests, and
+how to contribute changes to the gRPC codebase. The "How to contribute" document
+also contains info on how the contribution process works and contains best
+practices for creating contributions.
+
+## Troubleshooting
+
+Sometimes things go wrong. Please check out the
+[Troubleshooting guide](TROUBLESHOOTING.md) if you are experiencing issues with
+gRPC.
+
+## Performance
+
+See the
+[Performance dashboard](https://grafana-dot-grpc-testing.appspot.com/)
+for performance numbers of master branch daily builds.
+
+## Concepts
+
+See [gRPC Concepts](CONCEPTS.md)
+
+## About This Repository
+
+This repository contains source code for gRPC libraries implemented in multiple
+languages written on top of a shared C core library [src/core](src/core).
+
+Libraries in different languages may be in various states of development. We are
+seeking contributions for all of these libraries:
+
+| Language | Source |
+| ----------------------- | ---------------------------------- |
+| Shared C [core library] | [src/core](src/core) |
+| C++ | [src/cpp](src/cpp) |
+| Ruby | [src/ruby](src/ruby) |
+| Python | [src/python](src/python) |
+| PHP | [src/php](src/php) |
+| C# (core library based) | [src/csharp](src/csharp) |
+| Objective-C | [src/objective-c](src/objective-c) |
+
+| Language | Source repo |
+| -------------------- | -------------------------------------------------- |
+| Java | [grpc-java](https://github.com/grpc/grpc-java) |
+| Kotlin | [grpc-kotlin](https://github.com/grpc/grpc-kotlin) |
+| Go | [grpc-go](https://github.com/grpc/grpc-go) |
+| NodeJS | [grpc-node](https://github.com/grpc/grpc-node) |
+| WebJS | [grpc-web](https://github.com/grpc/grpc-web) |
+| Dart | [grpc-dart](https://github.com/grpc/grpc-dart) |
+| .NET (pure C# impl.) | [grpc-dotnet](https://github.com/grpc/grpc-dotnet) |
+| Swift | [grpc-swift](https://github.com/grpc/grpc-swift) |
diff --git a/contrib/python/grpcio/py3/README.rst b/contrib/python/grpcio/py3/README.rst
new file mode 100644
index 0000000000..f3e261db2e
--- /dev/null
+++ b/contrib/python/grpcio/py3/README.rst
@@ -0,0 +1,115 @@
+gRPC Python
+===========
+
+|compat_check_pypi|
+
+Package for gRPC Python.
+
+.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=grpcio
+ :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=grpcio
+
+Supported Python Versions
+-------------------------
+Python >= 3.7
+
+Installation
+------------
+
+gRPC Python is available for Linux, macOS, and Windows.
+
+Installing From PyPI
+~~~~~~~~~~~~~~~~~~~~
+
+If you are installing locally...
+
+::
+
+ $ pip install grpcio
+
+Else system wide (on Ubuntu)...
+
+::
+
+ $ sudo pip install grpcio
+
+If you're on Windows make sure that you installed the :code:`pip.exe` component
+when you installed Python (if not go back and install it!) then invoke:
+
+::
+
+ $ pip.exe install grpcio
+
+Windows users may need to invoke :code:`pip.exe` from a command line ran as
+administrator.
+
+n.b. On Windows and on Mac OS X one *must* have a recent release of :code:`pip`
+to retrieve the proper wheel from PyPI. Be sure to upgrade to the latest
+version!
+
+Installing From Source
+~~~~~~~~~~~~~~~~~~~~~~
+
+Building from source requires that you have the Python headers (usually a
+package named :code:`python-dev`).
+
+::
+
+ $ export REPO_ROOT=grpc # REPO_ROOT can be any directory of your choice
+ $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc $REPO_ROOT
+ $ cd $REPO_ROOT
+ $ git submodule update --init
+
+ # For the next two commands do `sudo pip install` if you get permission-denied errors
+ $ pip install -rrequirements.txt
+ $ GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .
+
+You cannot currently install Python from source on Windows. Things might work
+out for you in MSYS2 (follow the Linux instructions), but it isn't officially
+supported at the moment.
+
+Troubleshooting
+~~~~~~~~~~~~~~~
+
+Help, I ...
+
+* **... see a** :code:`pkg_resources.VersionConflict` **when I try to install
+ grpc**
+
+ This is likely because :code:`pip` doesn't own the offending dependency,
+ which in turn is likely because your operating system's package manager owns
+ it. You'll need to force the installation of the dependency:
+
+ :code:`pip install --ignore-installed $OFFENDING_DEPENDENCY`
+
+ For example, if you get an error like the following:
+
+ ::
+
+ Traceback (most recent call last):
+ File "<string>", line 17, in <module>
+ ...
+ File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 509, in find
+ raise VersionConflict(dist, req)
+ pkg_resources.VersionConflict: (six 1.8.0 (/usr/lib/python2.7/dist-packages), Requirement.parse('six>=1.10'))
+
+ You can fix it by doing:
+
+ ::
+
+ sudo pip install --ignore-installed six
+
+* **... see the following error on some platforms**
+
+ ::
+
+ /tmp/pip-build-U8pSsr/cython/Cython/Plex/Scanners.c:4:20: fatal error: Python.h: No such file or directory
+ #include "Python.h"
+ ^
+ compilation terminated.
+
+ You can fix it by installing `python-dev` package. i.e
+
+ ::
+
+ sudo apt-get install python-dev
+
diff --git a/contrib/python/grpcio/py3/grpc/__init__.py b/contrib/python/grpcio/py3/grpc/__init__.py
new file mode 100644
index 0000000000..ce7446dc90
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/__init__.py
@@ -0,0 +1,2174 @@
+# Copyright 2015-2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's Python API."""
+
+import abc
+import contextlib
+import enum
+import logging
+import sys
+
+from grpc import _compression
+from grpc._cython import cygrpc as _cygrpc
+from grpc._runtime_protos import protos
+from grpc._runtime_protos import protos_and_services
+from grpc._runtime_protos import services
+
+logging.getLogger(__name__).addHandler(logging.NullHandler())
+
+try:
+ # pylint: disable=ungrouped-imports
+ from grpc._grpcio_metadata import __version__
+except ImportError:
+ __version__ = "dev0"
+
+############################## Future Interface ###############################
+
+
+class FutureTimeoutError(Exception):
+ """Indicates that a method call on a Future timed out."""
+
+
+class FutureCancelledError(Exception):
+ """Indicates that the computation underlying a Future was cancelled."""
+
+
+class Future(abc.ABC):
+ """A representation of a computation in another control flow.
+
+ Computations represented by a Future may be yet to be begun,
+ may be ongoing, or may have already completed.
+ """
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
+
+ This method does not block.
+
+ Returns:
+ bool:
+ Returns True if the computation was canceled.
+
+ Returns False under all other circumstances, for example:
+
+ 1. computation has begun and could not be canceled.
+ 2. computation has finished
+ 3. computation is scheduled for execution and it is impossible
+ to determine its state without blocking.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
+
+ This method does not block.
+
+ Returns:
+ bool:
+ Returns True if the computation was cancelled before its result became
+ available.
+
+ Returns False under all other circumstances, for example:
+
+ 1. computation was not cancelled.
+ 2. computation's result is available.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
+
+ This method does not block.
+
+ Returns:
+ Returns True if the computation is scheduled for execution or
+ currently executing.
+
+ Returns False if the computation already executed or was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
+
+ This method does not block.
+
+ Returns:
+ bool:
+ Returns True if the computation already executed or was cancelled.
+ Returns False if the computation is scheduled for execution or
+ currently executing.
+ This is exactly opposite of the running() method's result.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Returns the result of the computation or raises its exception.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ finish or be cancelled. If None, the call will block until the
+ computations's termination.
+
+ Returns:
+ The return value of the computation.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation
+ does not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ Exception: If the computation raised an exception, this call will
+ raise the same exception.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled. If None, the call will block until the
+ computations's termination.
+
+ Returns:
+ The exception raised by the computation, or None if the computation
+ did not raise an exception.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation
+ does not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation
+ to terminate or be cancelled. If None, the call will block until
+ the computation's termination.
+
+ Returns:
+ The traceback of the exception raised by the computation, or None
+ if the computation did not raise an exception.
+
+ Raises:
+ FutureTimeoutError: If a timeout value is passed and the computation
+ does not terminate within the allotted time.
+ FutureCancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
+
+ The callback will be passed this Future object describing the outcome
+ of the computation. Callbacks will be invoked after the future is
+ terminated, whether successfully or not.
+
+ If the computation has already completed, the callback will be called
+ immediately.
+
+ Exceptions raised in the callback will be logged at ERROR level, but
+ will not terminate any threads of execution.
+
+ Args:
+ fn: A callable taking this Future object as its single parameter.
+ """
+ raise NotImplementedError()
+
+
+################################ gRPC Enums ##################################
+
+
+@enum.unique
+class ChannelConnectivity(enum.Enum):
+ """Mirrors grpc_connectivity_state in the gRPC Core.
+
+ Attributes:
+ IDLE: The channel is idle.
+ CONNECTING: The channel is connecting.
+ READY: The channel is ready to conduct RPCs.
+ TRANSIENT_FAILURE: The channel has seen a failure from which it expects
+ to recover.
+ SHUTDOWN: The channel has seen a failure from which it cannot recover.
+ """
+ IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
+ CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
+ READY = (_cygrpc.ConnectivityState.ready, 'ready')
+ TRANSIENT_FAILURE = (_cygrpc.ConnectivityState.transient_failure,
+ 'transient failure')
+ SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
+
+
+@enum.unique
+class StatusCode(enum.Enum):
+ """Mirrors grpc_status_code in the gRPC Core.
+
+ Attributes:
+ OK: Not an error; returned on success
+ CANCELLED: The operation was cancelled (typically by the caller).
+ UNKNOWN: Unknown error.
+ INVALID_ARGUMENT: Client specified an invalid argument.
+ DEADLINE_EXCEEDED: Deadline expired before operation could complete.
+ NOT_FOUND: Some requested entity (e.g., file or directory) was not found.
+ ALREADY_EXISTS: Some entity that we attempted to create (e.g., file or directory)
+ already exists.
+ PERMISSION_DENIED: The caller does not have permission to execute the specified
+ operation.
+ UNAUTHENTICATED: The request does not have valid authentication credentials for the
+ operation.
+ RESOURCE_EXHAUSTED: Some resource has been exhausted, perhaps a per-user quota, or
+ perhaps the entire file system is out of space.
+ FAILED_PRECONDITION: Operation was rejected because the system is not in a state
+ required for the operation's execution.
+ ABORTED: The operation was aborted, typically due to a concurrency issue
+ like sequencer check failures, transaction aborts, etc.
+ UNIMPLEMENTED: Operation is not implemented or not supported/enabled in this service.
+ INTERNAL: Internal errors. Means some invariants expected by underlying
+ system has been broken.
+ UNAVAILABLE: The service is currently unavailable.
+ DATA_LOSS: Unrecoverable data loss or corruption.
+ """
+ OK = (_cygrpc.StatusCode.ok, 'ok')
+ CANCELLED = (_cygrpc.StatusCode.cancelled, 'cancelled')
+ UNKNOWN = (_cygrpc.StatusCode.unknown, 'unknown')
+ INVALID_ARGUMENT = (_cygrpc.StatusCode.invalid_argument, 'invalid argument')
+ DEADLINE_EXCEEDED = (_cygrpc.StatusCode.deadline_exceeded,
+ 'deadline exceeded')
+ NOT_FOUND = (_cygrpc.StatusCode.not_found, 'not found')
+ ALREADY_EXISTS = (_cygrpc.StatusCode.already_exists, 'already exists')
+ PERMISSION_DENIED = (_cygrpc.StatusCode.permission_denied,
+ 'permission denied')
+ RESOURCE_EXHAUSTED = (_cygrpc.StatusCode.resource_exhausted,
+ 'resource exhausted')
+ FAILED_PRECONDITION = (_cygrpc.StatusCode.failed_precondition,
+ 'failed precondition')
+ ABORTED = (_cygrpc.StatusCode.aborted, 'aborted')
+ OUT_OF_RANGE = (_cygrpc.StatusCode.out_of_range, 'out of range')
+ UNIMPLEMENTED = (_cygrpc.StatusCode.unimplemented, 'unimplemented')
+ INTERNAL = (_cygrpc.StatusCode.internal, 'internal')
+ UNAVAILABLE = (_cygrpc.StatusCode.unavailable, 'unavailable')
+ DATA_LOSS = (_cygrpc.StatusCode.data_loss, 'data loss')
+ UNAUTHENTICATED = (_cygrpc.StatusCode.unauthenticated, 'unauthenticated')
+
+
+############################# gRPC Status ################################
+
+
+class Status(abc.ABC):
+ """Describes the status of an RPC.
+
+ This is an EXPERIMENTAL API.
+
+ Attributes:
+ code: A StatusCode object to be sent to the client.
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+ trailing_metadata: The trailing :term:`metadata` in the RPC.
+ """
+
+
+############################# gRPC Exceptions ################################
+
+
+class RpcError(Exception):
+ """Raised by the gRPC library to indicate non-OK-status RPC termination."""
+
+
+############################## Shared Context ################################
+
+
+class RpcContext(abc.ABC):
+ """Provides RPC-related information and action."""
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated.
+
+ Returns:
+ bool:
+ True if RPC is active, False otherwise.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have
+ timed out, or None if no deadline was specified for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
+
+ Idempotent and has no effect if the RPC has already terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_callback(self, callback):
+ """Registers a callback to be called on RPC termination.
+
+ Args:
+ callback: A no-parameter callable to be called on RPC termination.
+
+ Returns:
+ True if the callback was added and will be called later; False if
+ the callback was not added and will not be called (because the RPC
+ already terminated or some other reason).
+ """
+ raise NotImplementedError()
+
+
+######################### Invocation-Side Context ############################
+
+
+class Call(RpcContext, metaclass=abc.ABCMeta):
+ """Invocation-side utility object for an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The initial :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def trailing_metadata(self):
+ """Accesses the trailing metadata sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The trailing :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the status code sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details sent by the server.
+
+ This method blocks until the value is available.
+
+ Returns:
+ The details string of the RPC.
+ """
+ raise NotImplementedError()
+
+
+############## Invocation-Side Interceptor Interfaces & Classes ##############
+
+
+class ClientCallDetails(abc.ABC):
+ """Describes an RPC to be invoked.
+
+ Attributes:
+ method: The method name of the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to
+ the service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+ """
+
+
+class UnaryUnaryClientInterceptor(abc.ABC):
+ """Affords intercepting unary-unary invocations."""
+
+ @abc.abstractmethod
+ def intercept_unary_unary(self, continuation, client_call_details, request):
+ """Intercepts a unary-unary invocation asynchronously.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_future = continuation(client_call_details, request)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and a Future. In the event of RPC
+ completion, the return Call-Future's result value will be
+ the response message of the RPC. Should the event terminate
+ with non-OK status, the returned Call-Future's exception value
+ will be an RpcError.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request: The request value for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's
+ result value will be the response message of the RPC.
+ Should the event terminate with non-OK status, the returned
+ Call-Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamClientInterceptor(abc.ABC):
+ """Affords intercepting unary-stream invocations."""
+
+ @abc.abstractmethod
+ def intercept_unary_stream(self, continuation, client_call_details,
+ request):
+ """Intercepts a unary-stream invocation.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_iterator = continuation(client_call_details, request)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and an iterator for response values.
+ Drawing response values from the returned Call-iterator may
+ raise RpcError indicating termination of the RPC with non-OK
+ status.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request: The request value for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of
+ response values. Drawing response values from the returned
+ Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status. This object *should* also fulfill the
+ Future interface, though it may not.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryClientInterceptor(abc.ABC):
+ """Affords intercepting stream-unary invocations."""
+
+ @abc.abstractmethod
+ def intercept_stream_unary(self, continuation, client_call_details,
+ request_iterator):
+ """Intercepts a stream-unary invocation asynchronously.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_future = continuation(client_call_details, request_iterator)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and a Future. In the event of RPC completion,
+ the return Call-Future's result value will be the response message
+ of the RPC. Should the event terminate with non-OK status, the
+ returned Call-Future's exception value will be an RpcError.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's
+ result value will be the response message of the RPC.
+ Should the event terminate with non-OK status, the returned
+ Call-Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamClientInterceptor(abc.ABC):
+ """Affords intercepting stream-stream invocations."""
+
+ @abc.abstractmethod
+ def intercept_stream_stream(self, continuation, client_call_details,
+ request_iterator):
+ """Intercepts a stream-stream invocation.
+
+ Args:
+ continuation: A function that proceeds with the invocation by
+ executing the next interceptor in chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `response_iterator = continuation(client_call_details, request_iterator)`
+ to continue with the RPC. `continuation` returns an object that is
+ both a Call for the RPC and an iterator for response values.
+ Drawing response values from the returned Call-iterator may
+ raise RpcError indicating termination of the RPC with non-OK
+ status.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of
+ response values. Drawing response values from the returned
+ Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status. This object *should* also fulfill the
+ Future interface, though it may not.
+ """
+ raise NotImplementedError()
+
+
+############ Authentication & Authorization Interfaces & Classes #############
+
+
+class ChannelCredentials(object):
+ """An encapsulation of the data required to create a secure Channel.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions. For
+ example, ssl_channel_credentials returns an instance of this class and
+ secure_channel requires an instance of this class.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class CallCredentials(object):
+ """An encapsulation of the data required to assert an identity over a call.
+
+ A CallCredentials has to be used with secure Channel, otherwise the
+ metadata will not be transmitted to the server.
+
+ A CallCredentials may be composed with ChannelCredentials to always assert
+ identity for every call over that Channel.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class AuthMetadataContext(abc.ABC):
+ """Provides information to call credentials metadata plugins.
+
+ Attributes:
+ service_url: A string URL of the service being called into.
+ method_name: A string of the fully qualified method name being called.
+ """
+
+
+class AuthMetadataPluginCallback(abc.ABC):
+ """Callback object received by a metadata plugin."""
+
+ def __call__(self, metadata, error):
+ """Passes to the gRPC runtime authentication metadata for an RPC.
+
+ Args:
+ metadata: The :term:`metadata` used to construct the CallCredentials.
+ error: An Exception to indicate error or None to indicate success.
+ """
+ raise NotImplementedError()
+
+
+class AuthMetadataPlugin(abc.ABC):
+ """A specification for custom authentication."""
+
+ def __call__(self, context, callback):
+ """Implements authentication by passing metadata to a callback.
+
+ This method will be invoked asynchronously in a separate thread.
+
+ Args:
+ context: An AuthMetadataContext providing information on the RPC that
+ the plugin is being called to authenticate.
+ callback: An AuthMetadataPluginCallback to be invoked either
+ synchronously or asynchronously.
+ """
+ raise NotImplementedError()
+
+
+class ServerCredentials(object):
+ """An encapsulation of the data required to open a secure port on a Server.
+
+ This class has no supported interface - it exists to define the type of its
+ instances and its instances exist to be passed to other functions.
+ """
+
+ def __init__(self, credentials):
+ self._credentials = credentials
+
+
+class ServerCertificateConfiguration(object):
+ """A certificate configuration for use with an SSL-enabled Server.
+
+ Instances of this class can be returned in the certificate configuration
+ fetching callback.
+
+ This class has no supported interface -- it exists to define the
+ type of its instances and its instances exist to be passed to
+ other functions.
+ """
+
+ def __init__(self, certificate_configuration):
+ self._certificate_configuration = certificate_configuration
+
+
+######################## Multi-Callable Interfaces ###########################
+
+
+class UnaryUnaryMultiCallable(abc.ABC):
+ """Affords invoking a unary-unary RPC from client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow
+ for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ The response value for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def with_call(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional durating of time in seconds to allow for
+ the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ The response value for the RPC and a Call value for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's result
+ value will be the response message of the RPC.
+ Should the event terminate with non-OK status,
+ the returned Call-Future's exception value will be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamMultiCallable(abc.ABC):
+ """Affords invoking a unary-stream RPC from client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: An optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ An object that is a Call for the RPC, an iterator of response
+ values, and a Future for the RPC. Drawing response values from the
+ returned Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryMultiCallable(abc.ABC):
+ """Affords invoking a stream-unary RPC from client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for
+ the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ The response value for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also implement grpc.Call, affording methods
+ such as metadata, code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def with_call(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Synchronously invokes the underlying RPC on the client.
+
+ Args:
+ request_iterator: An iterator that yields request values for
+ the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ The response value for the RPC and a Call object for the RPC.
+
+ Raises:
+ RpcError: Indicating that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Asynchronously invokes the underlying RPC on the client.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If None, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ An object that is both a Call for the RPC and a Future.
+ In the event of RPC completion, the return Call-Future's result value
+ will be the response message of the RPC. Should the event terminate
+ with non-OK status, the returned Call-Future's exception value will
+ be an RpcError.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamMultiCallable(abc.ABC):
+ """Affords invoking a stream-stream RPC on client-side."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout=None,
+ metadata=None,
+ credentials=None,
+ wait_for_ready=None,
+ compression=None):
+ """Invokes the underlying RPC on the client.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: An optional duration of time in seconds to allow for
+ the RPC. If not specified, the timeout is considered infinite.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ An object that is a Call for the RPC, an iterator of response
+ values, and a Future for the RPC. Drawing response values from the
+ returned Call-iterator may raise RpcError indicating termination of
+ the RPC with non-OK status.
+ """
+ raise NotImplementedError()
+
+
+############################# Channel Interface ##############################
+
+
+class Channel(abc.ABC):
+ """Affords RPC invocation via generic methods on client-side.
+
+ Channel objects implement the Context Manager type, although they need not
+ support being entered and exited multiple times.
+ """
+
+ @abc.abstractmethod
+ def subscribe(self, callback, try_to_connect=False):
+ """Subscribe to this Channel's connectivity state machine.
+
+ A Channel may be in any of the states described by ChannelConnectivity.
+ This method allows application to monitor the state transitions.
+ The typical use case is to debug or gain better visibility into gRPC
+ runtime's state.
+
+ Args:
+ callback: A callable to be invoked with ChannelConnectivity argument.
+ ChannelConnectivity describes current state of the channel.
+ The callable will be invoked immediately upon subscription
+ and again for every change to ChannelConnectivity until it
+ is unsubscribed or this Channel object goes out of scope.
+ try_to_connect: A boolean indicating whether or not this Channel
+ should attempt to connect immediately. If set to False, gRPC
+ runtime decides when to connect.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unsubscribe(self, callback):
+ """Unsubscribes a subscribed callback from this Channel's connectivity.
+
+ Args:
+ callback: A callable previously registered with this Channel from
+ having been passed to its "subscribe" method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A UnaryUnaryMultiCallable value for the named unary-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None is
+ passed.
+
+ Returns:
+ A UnaryStreamMultiCallable value for the name unary-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_unary(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None is
+ passed.
+
+ Returns:
+ A StreamUnaryMultiCallable value for the named stream-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_stream(self,
+ method,
+ request_serializer=None,
+ response_deserializer=None):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A StreamStreamMultiCallable value for the named stream-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def close(self):
+ """Closes this Channel and releases all resources held by it.
+
+ Closing the Channel will immediately terminate all RPCs active with the
+ Channel and it is not valid to invoke new RPCs with the Channel.
+
+ This method is idempotent.
+ """
+ raise NotImplementedError()
+
+ def __enter__(self):
+ """Enters the runtime context related to the channel object."""
+ raise NotImplementedError()
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Exits the runtime context related to the channel object."""
+ raise NotImplementedError()
+
+
+########################## Service-Side Context ##############################
+
+
+class ServicerContext(RpcContext, metaclass=abc.ABCMeta):
+ """A context object passed to method implementations."""
+
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata sent by the client.
+
+ Returns:
+ The invocation :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
+
+ Returns:
+ A string identifying the peer that invoked the RPC being serviced.
+ The string format is determined by gRPC runtime.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer_identities(self):
+ """Gets one or more peer identity(s).
+
+ Equivalent to
+ servicer_context.auth_context().get(servicer_context.peer_identity_key())
+
+ Returns:
+ An iterable of the identities, or None if the call is not
+ authenticated. Each identity is returned as a raw bytes type.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def peer_identity_key(self):
+ """The auth property used to identify the peer.
+
+ For example, "x509_common_name" or "x509_subject_alternative_name" are
+ used to identify an SSL peer.
+
+ Returns:
+ The auth property (string) that indicates the
+ peer identity, or None if the call is not authenticated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def auth_context(self):
+ """Gets the auth context for the call.
+
+ Returns:
+ A map of strings to an iterable of bytes for each auth property.
+ """
+ raise NotImplementedError()
+
+ def set_compression(self, compression):
+ """Set the compression algorithm to be used for the entire call.
+
+ Args:
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def send_initial_metadata(self, initial_metadata):
+ """Sends the initial metadata value to the client.
+
+ This method need not be called by implementations if they have no
+ metadata to add to what the gRPC runtime will transmit.
+
+ Args:
+ initial_metadata: The initial :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_trailing_metadata(self, trailing_metadata):
+ """Sets the trailing metadata for the RPC.
+
+ Sets the trailing metadata to be sent upon completion of the RPC.
+
+ If this method is invoked multiple times throughout the lifetime of an
+ RPC, the value supplied in the final invocation will be the value sent
+ over the wire.
+
+ This method need not be called by implementations if they have no
+ metadata to add to what the gRPC runtime will transmit.
+
+ Args:
+ trailing_metadata: The trailing :term:`metadata`.
+ """
+ raise NotImplementedError()
+
+ def trailing_metadata(self):
+ """Access value to be used as trailing metadata upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The trailing :term:`metadata` for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort(self, code, details):
+ """Raises an exception to terminate the RPC with a non-OK status.
+
+ The code and details passed as arguments will supercede any existing
+ ones.
+
+ Args:
+ code: A StatusCode object to be sent to the client.
+ It must not be StatusCode.OK.
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+
+ Raises:
+ Exception: An exception is always raised to signal the abortion the
+ RPC to the gRPC runtime.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def abort_with_status(self, status):
+ """Raises an exception to terminate the RPC with a non-OK status.
+
+ The status passed as argument will supercede any existing status code,
+ status message and trailing metadata.
+
+ This is an EXPERIMENTAL API.
+
+ Args:
+ status: A grpc.Status object. The status code in it must not be
+ StatusCode.OK.
+
+ Raises:
+ Exception: An exception is always raised to signal the abortion the
+ RPC to the gRPC runtime.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_code(self, code):
+ """Sets the value to be used as status code upon RPC completion.
+
+ This method need not be called by method implementations if they wish
+ the gRPC runtime to determine the status code of the RPC.
+
+ Args:
+ code: A StatusCode object to be sent to the client.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_details(self, details):
+ """Sets the value to be used as detail string upon RPC completion.
+
+ This method need not be called by method implementations if they have
+ no details to transmit.
+
+ Args:
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+ """
+ raise NotImplementedError()
+
+ def code(self):
+ """Accesses the value to be used as status code upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+ raise NotImplementedError()
+
+ def details(self):
+ """Accesses the value to be used as detail string upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The details string of the RPC.
+ """
+ raise NotImplementedError()
+
+ def disable_next_message_compression(self):
+ """Disables compression for the next response message.
+
+ This method will override any compression configuration set during
+ server creation or set on the call.
+ """
+ raise NotImplementedError()
+
+
+##################### Service-Side Handler Interfaces ########################
+
+
+class RpcMethodHandler(abc.ABC):
+ """An implementation of a single RPC method.
+
+ Attributes:
+ request_streaming: Whether the RPC supports exactly one request message
+ or any arbitrary number of request messages.
+ response_streaming: Whether the RPC supports exactly one response message
+ or any arbitrary number of response messages.
+ request_deserializer: A callable :term:`deserializer` that accepts a byte string and
+ returns an object suitable to be passed to this object's business
+ logic, or None to indicate that this object's business logic should be
+ passed the raw request bytes.
+ response_serializer: A callable :term:`serializer` that accepts an object produced
+ by this object's business logic and returns a byte string, or None to
+ indicate that the byte strings produced by this object's business logic
+ should be transmitted on the wire as they are.
+ unary_unary: This object's application-specific business logic as a
+ callable value that takes a request value and a ServicerContext object
+ and returns a response value. Only non-None if both request_streaming
+ and response_streaming are False.
+ unary_stream: This object's application-specific business logic as a
+ callable value that takes a request value and a ServicerContext object
+ and returns an iterator of response values. Only non-None if
+ request_streaming is False and response_streaming is True.
+ stream_unary: This object's application-specific business logic as a
+ callable value that takes an iterator of request values and a
+ ServicerContext object and returns a response value. Only non-None if
+ request_streaming is True and response_streaming is False.
+ stream_stream: This object's application-specific business logic as a
+ callable value that takes an iterator of request values and a
+ ServicerContext object and returns an iterator of response values.
+ Only non-None if request_streaming and response_streaming are both
+ True.
+ """
+
+
+class HandlerCallDetails(abc.ABC):
+ """Describes an RPC that has just arrived for service.
+
+ Attributes:
+ method: The method name of the RPC.
+ invocation_metadata: The :term:`metadata` sent by the client.
+ """
+
+
+class GenericRpcHandler(abc.ABC):
+ """An implementation of arbitrarily many RPC methods."""
+
+ @abc.abstractmethod
+ def service(self, handler_call_details):
+ """Returns the handler for servicing the RPC.
+
+ Args:
+ handler_call_details: A HandlerCallDetails describing the RPC.
+
+ Returns:
+ An RpcMethodHandler with which the RPC may be serviced if the
+ implementation chooses to service this RPC, or None otherwise.
+ """
+ raise NotImplementedError()
+
+
+class ServiceRpcHandler(GenericRpcHandler, metaclass=abc.ABCMeta):
+ """An implementation of RPC methods belonging to a service.
+
+ A service handles RPC methods with structured names of the form
+ '/Service.Name/Service.Method', where 'Service.Name' is the value
+ returned by service_name(), and 'Service.Method' is the method
+ name. A service can have multiple method names, but only a single
+ service name.
+ """
+
+ @abc.abstractmethod
+ def service_name(self):
+ """Returns this service's name.
+
+ Returns:
+ The service name.
+ """
+ raise NotImplementedError()
+
+
+#################### Service-Side Interceptor Interfaces #####################
+
+
+class ServerInterceptor(abc.ABC):
+ """Affords intercepting incoming RPCs on the service-side."""
+
+ @abc.abstractmethod
+ def intercept_service(self, continuation, handler_call_details):
+ """Intercepts incoming RPCs before handing them over to a handler.
+
+ Args:
+ continuation: A function that takes a HandlerCallDetails and
+ proceeds to invoke the next interceptor in the chain, if any,
+ or the RPC handler lookup logic, with the call details passed
+ as an argument, and returns an RpcMethodHandler instance if
+ the RPC is considered serviced, or None otherwise.
+ handler_call_details: A HandlerCallDetails describing the RPC.
+
+ Returns:
+ An RpcMethodHandler with which the RPC may be serviced if the
+ interceptor chooses to service this RPC, or None otherwise.
+ """
+ raise NotImplementedError()
+
+
+############################# Server Interface ###############################
+
+
+class Server(abc.ABC):
+ """Services RPCs."""
+
+ @abc.abstractmethod
+ def add_generic_rpc_handlers(self, generic_rpc_handlers):
+ """Registers GenericRpcHandlers with this Server.
+
+ This method is only safe to call before the server is started.
+
+ Args:
+ generic_rpc_handlers: An iterable of GenericRpcHandlers that will be
+ used to service RPCs.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Opens an insecure port for accepting RPCs.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port. If the port is 0,
+ or not specified in the address, then gRPC runtime will choose a port.
+
+ Returns:
+ An integer port on which server will accept RPC requests.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Opens a secure port for accepting RPCs.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port.
+ if the port is 0, or not specified in the address, then gRPC
+ runtime will choose a port.
+ server_credentials: A ServerCredentials object.
+
+ Returns:
+ An integer port on which server will accept RPC requests.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server.
+
+ This method may only be called once. (i.e. it is not idempotent).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server.
+
+ This method immediately stop service of new RPCs in all cases.
+
+ If a grace period is specified, this method returns immediately
+ and all RPCs active at the end of the grace period are aborted.
+ If a grace period is not specified (by passing None for `grace`),
+ all existing RPCs are aborted immediately and this method
+ blocks until the last RPC handler terminates.
+
+ This method is idempotent and may be called at any time.
+ Passing a smaller grace value in a subsequent call will have
+ the effect of stopping the Server sooner (passing None will
+ have the effect of stopping the server immediately). Passing
+ a larger grace value in a subsequent call *will not* have the
+ effect of stopping the server later (i.e. the most restrictive
+ grace value is used).
+
+ Args:
+ grace: A duration of time in seconds or None.
+
+ Returns:
+ A threading.Event that will be set when this Server has completely
+ stopped, i.e. when running RPCs either complete or are aborted and
+ all handlers have terminated.
+ """
+ raise NotImplementedError()
+
+ def wait_for_termination(self, timeout=None):
+ """Block current thread until the server stops.
+
+ This is an EXPERIMENTAL API.
+
+ The wait will not consume computational resources during blocking, and
+ it will block until one of the two following conditions are met:
+
+ 1) The server is stopped or terminated;
+ 2) A timeout occurs if timeout is not `None`.
+
+ The timeout argument works in the same way as `threading.Event.wait()`.
+ https://docs.python.org/3/library/threading.html#threading.Event.wait
+
+ Args:
+ timeout: A floating point number specifying a timeout for the
+ operation in seconds.
+
+ Returns:
+ A bool indicates if the operation times out.
+ """
+ raise NotImplementedError()
+
+
+################################# Functions ################################
+
+
+def unary_unary_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-unary RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts one request
+ and returns one response.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(False, False, request_deserializer,
+ response_serializer, behavior, None,
+ None, None)
+
+
+def unary_stream_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a unary-stream RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts one request
+ and returns an iterator of response values.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(False, True, request_deserializer,
+ response_serializer, None, behavior,
+ None, None)
+
+
+def stream_unary_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-unary RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts an iterator of
+ request values and returns a single response value.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(True, False, request_deserializer,
+ response_serializer, None, None,
+ behavior, None)
+
+
+def stream_stream_rpc_method_handler(behavior,
+ request_deserializer=None,
+ response_serializer=None):
+ """Creates an RpcMethodHandler for a stream-stream RPC method.
+
+ Args:
+ behavior: The implementation of an RPC that accepts an iterator of
+ request values and returns an iterator of response values.
+ request_deserializer: An optional :term:`deserializer` for request deserialization.
+ response_serializer: An optional :term:`serializer` for response serialization.
+
+ Returns:
+ An RpcMethodHandler object that is typically used by grpc.Server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.RpcMethodHandler(True, True, request_deserializer,
+ response_serializer, None, None, None,
+ behavior)
+
+
+def method_handlers_generic_handler(service, method_handlers):
+ """Creates a GenericRpcHandler from RpcMethodHandlers.
+
+ Args:
+ service: The name of the service that is implemented by the
+ method_handlers.
+ method_handlers: A dictionary that maps method names to corresponding
+ RpcMethodHandler.
+
+ Returns:
+ A GenericRpcHandler. This is typically added to the grpc.Server object
+ with add_generic_rpc_handlers() before starting the server.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.DictionaryGenericHandler(service, method_handlers)
+
+
+def ssl_channel_credentials(root_certificates=None,
+ private_key=None,
+ certificate_chain=None):
+ """Creates a ChannelCredentials for use with an SSL-enabled Channel.
+
+ Args:
+ root_certificates: The PEM-encoded root certificates as a byte string,
+ or None to retrieve them from a default location chosen by gRPC
+ runtime.
+ private_key: The PEM-encoded private key as a byte string, or None if no
+ private key should be used.
+ certificate_chain: The PEM-encoded certificate chain as a byte string
+ to use or None if no certificate chain should be used.
+
+ Returns:
+ A ChannelCredentials for use with an SSL-enabled Channel.
+ """
+ return ChannelCredentials(
+ _cygrpc.SSLChannelCredentials(root_certificates, private_key,
+ certificate_chain))
+
+
+def xds_channel_credentials(fallback_credentials=None):
+ """Creates a ChannelCredentials for use with xDS. This is an EXPERIMENTAL
+ API.
+
+ Args:
+ fallback_credentials: Credentials to use in case it is not possible to
+ establish a secure connection via xDS. If no fallback_credentials
+ argument is supplied, a default SSLChannelCredentials is used.
+ """
+ fallback_credentials = ssl_channel_credentials(
+ ) if fallback_credentials is None else fallback_credentials
+ return ChannelCredentials(
+ _cygrpc.XDSChannelCredentials(fallback_credentials._credentials))
+
+
+def metadata_call_credentials(metadata_plugin, name=None):
+ """Construct CallCredentials from an AuthMetadataPlugin.
+
+ Args:
+ metadata_plugin: An AuthMetadataPlugin to use for authentication.
+ name: An optional name for the plugin.
+
+ Returns:
+ A CallCredentials.
+ """
+ from grpc import _plugin_wrapping # pylint: disable=cyclic-import
+ return _plugin_wrapping.metadata_plugin_call_credentials(
+ metadata_plugin, name)
+
+
+def access_token_call_credentials(access_token):
+ """Construct CallCredentials from an access token.
+
+ Args:
+ access_token: A string to place directly in the http request
+ authorization header, for example
+ "authorization: Bearer <access_token>".
+
+ Returns:
+ A CallCredentials.
+ """
+ from grpc import _auth # pylint: disable=cyclic-import
+ from grpc import _plugin_wrapping # pylint: disable=cyclic-import
+ return _plugin_wrapping.metadata_plugin_call_credentials(
+ _auth.AccessTokenAuthMetadataPlugin(access_token), None)
+
+
+def composite_call_credentials(*call_credentials):
+ """Compose multiple CallCredentials to make a new CallCredentials.
+
+ Args:
+ *call_credentials: At least two CallCredentials objects.
+
+ Returns:
+ A CallCredentials object composed of the given CallCredentials objects.
+ """
+ return CallCredentials(
+ _cygrpc.CompositeCallCredentials(
+ tuple(single_call_credentials._credentials
+ for single_call_credentials in call_credentials)))
+
+
+def composite_channel_credentials(channel_credentials, *call_credentials):
+ """Compose a ChannelCredentials and one or more CallCredentials objects.
+
+ Args:
+ channel_credentials: A ChannelCredentials object.
+ *call_credentials: One or more CallCredentials objects.
+
+ Returns:
+ A ChannelCredentials composed of the given ChannelCredentials and
+ CallCredentials objects.
+ """
+ return ChannelCredentials(
+ _cygrpc.CompositeChannelCredentials(
+ tuple(single_call_credentials._credentials
+ for single_call_credentials in call_credentials),
+ channel_credentials._credentials))
+
+
+def ssl_server_credentials(private_key_certificate_chain_pairs,
+ root_certificates=None,
+ require_client_auth=False):
+ """Creates a ServerCredentials for use with an SSL-enabled Server.
+
+ Args:
+ private_key_certificate_chain_pairs: A list of pairs of the form
+ [PEM-encoded private key, PEM-encoded certificate chain].
+ root_certificates: An optional byte string of PEM-encoded client root
+ certificates that the server will use to verify client authentication.
+ If omitted, require_client_auth must also be False.
+ require_client_auth: A boolean indicating whether or not to require
+ clients to be authenticated. May only be True if root_certificates
+ is not None.
+
+ Returns:
+ A ServerCredentials for use with an SSL-enabled Server. Typically, this
+ object is an argument to add_secure_port() method during server setup.
+ """
+ if not private_key_certificate_chain_pairs:
+ raise ValueError(
+ 'At least one private key-certificate chain pair is required!')
+ elif require_client_auth and root_certificates is None:
+ raise ValueError(
+ 'Illegal to require client auth without providing root certificates!'
+ )
+ else:
+ return ServerCredentials(
+ _cygrpc.server_credentials_ssl(root_certificates, [
+ _cygrpc.SslPemKeyCertPair(key, pem)
+ for key, pem in private_key_certificate_chain_pairs
+ ], require_client_auth))
+
+
+def xds_server_credentials(fallback_credentials):
+ """Creates a ServerCredentials for use with xDS. This is an EXPERIMENTAL
+ API.
+
+ Args:
+ fallback_credentials: Credentials to use in case it is not possible to
+ establish a secure connection via xDS. No default value is provided.
+ """
+ return ServerCredentials(
+ _cygrpc.xds_server_credentials(fallback_credentials._credentials))
+
+
+def insecure_server_credentials():
+ """Creates a credentials object directing the server to use no credentials.
+ This is an EXPERIMENTAL API.
+
+ This object cannot be used directly in a call to `add_secure_port`.
+ Instead, it should be used to construct other credentials objects, e.g.
+ with xds_server_credentials.
+ """
+ return ServerCredentials(_cygrpc.insecure_server_credentials())
+
+
+def ssl_server_certificate_configuration(private_key_certificate_chain_pairs,
+ root_certificates=None):
+ """Creates a ServerCertificateConfiguration for use with a Server.
+
+ Args:
+ private_key_certificate_chain_pairs: A collection of pairs of
+ the form [PEM-encoded private key, PEM-encoded certificate
+ chain].
+ root_certificates: An optional byte string of PEM-encoded client root
+ certificates that the server will use to verify client authentication.
+
+ Returns:
+ A ServerCertificateConfiguration that can be returned in the certificate
+ configuration fetching callback.
+ """
+ if private_key_certificate_chain_pairs:
+ return ServerCertificateConfiguration(
+ _cygrpc.server_certificate_config_ssl(root_certificates, [
+ _cygrpc.SslPemKeyCertPair(key, pem)
+ for key, pem in private_key_certificate_chain_pairs
+ ]))
+ else:
+ raise ValueError(
+ 'At least one private key-certificate chain pair is required!')
+
+
+def dynamic_ssl_server_credentials(initial_certificate_configuration,
+ certificate_configuration_fetcher,
+ require_client_authentication=False):
+ """Creates a ServerCredentials for use with an SSL-enabled Server.
+
+ Args:
+ initial_certificate_configuration (ServerCertificateConfiguration): The
+ certificate configuration with which the server will be initialized.
+ certificate_configuration_fetcher (callable): A callable that takes no
+ arguments and should return a ServerCertificateConfiguration to
+ replace the server's current certificate, or None for no change
+ (i.e., the server will continue its current certificate
+ config). The library will call this callback on *every* new
+ client connection before starting the TLS handshake with the
+ client, thus allowing the user application to optionally
+ return a new ServerCertificateConfiguration that the server will then
+ use for the handshake.
+ require_client_authentication: A boolean indicating whether or not to
+ require clients to be authenticated.
+
+ Returns:
+ A ServerCredentials.
+ """
+ return ServerCredentials(
+ _cygrpc.server_credentials_ssl_dynamic_cert_config(
+ initial_certificate_configuration,
+ certificate_configuration_fetcher, require_client_authentication))
+
+
+@enum.unique
+class LocalConnectionType(enum.Enum):
+ """Types of local connection for local credential creation.
+
+ Attributes:
+ UDS: Unix domain socket connections
+ LOCAL_TCP: Local TCP connections.
+ """
+ UDS = _cygrpc.LocalConnectionType.uds
+ LOCAL_TCP = _cygrpc.LocalConnectionType.local_tcp
+
+
+def local_channel_credentials(local_connect_type=LocalConnectionType.LOCAL_TCP):
+ """Creates a local ChannelCredentials used for local connections.
+
+ This is an EXPERIMENTAL API.
+
+ Local credentials are used by local TCP endpoints (e.g. localhost:10000)
+ also UDS connections.
+
+ The connections created by local channel credentials are not
+ encrypted, but will be checked if they are local or not.
+ The UDS connections are considered secure by providing peer authentication
+ and data confidentiality while TCP connections are considered insecure.
+
+ It is allowed to transmit call credentials over connections created by
+ local channel credentials.
+
+ Local channel credentials are useful for 1) eliminating insecure_channel usage;
+ 2) enable unit testing for call credentials without setting up secrets.
+
+ Args:
+ local_connect_type: Local connection type (either
+ grpc.LocalConnectionType.UDS or grpc.LocalConnectionType.LOCAL_TCP)
+
+ Returns:
+ A ChannelCredentials for use with a local Channel
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_local(local_connect_type.value))
+
+
+def local_server_credentials(local_connect_type=LocalConnectionType.LOCAL_TCP):
+ """Creates a local ServerCredentials used for local connections.
+
+ This is an EXPERIMENTAL API.
+
+ Local credentials are used by local TCP endpoints (e.g. localhost:10000)
+ also UDS connections.
+
+ The connections created by local server credentials are not
+ encrypted, but will be checked if they are local or not.
+ The UDS connections are considered secure by providing peer authentication
+ and data confidentiality while TCP connections are considered insecure.
+
+ It is allowed to transmit call credentials over connections created by local
+ server credentials.
+
+ Local server credentials are useful for 1) eliminating insecure_channel usage;
+ 2) enable unit testing for call credentials without setting up secrets.
+
+ Args:
+ local_connect_type: Local connection type (either
+ grpc.LocalConnectionType.UDS or grpc.LocalConnectionType.LOCAL_TCP)
+
+ Returns:
+ A ServerCredentials for use with a local Server
+ """
+ return ServerCredentials(
+ _cygrpc.server_credentials_local(local_connect_type.value))
+
+
+def alts_channel_credentials(service_accounts=None):
+ """Creates a ChannelCredentials for use with an ALTS-enabled Channel.
+
+ This is an EXPERIMENTAL API.
+ ALTS credentials API can only be used in GCP environment as it relies on
+ handshaker service being available. For more info about ALTS see
+ https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security
+
+ Args:
+ service_accounts: A list of server identities accepted by the client.
+ If target service accounts are provided and none of them matches the
+ peer identity of the server, handshake will fail. The arg can be empty
+ if the client does not have any information about trusted server
+ identity.
+ Returns:
+ A ChannelCredentials for use with an ALTS-enabled Channel
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_alts(service_accounts or []))
+
+
+def alts_server_credentials():
+ """Creates a ServerCredentials for use with an ALTS-enabled connection.
+
+ This is an EXPERIMENTAL API.
+ ALTS credentials API can only be used in GCP environment as it relies on
+ handshaker service being available. For more info about ALTS see
+ https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security
+
+ Returns:
+ A ServerCredentials for use with an ALTS-enabled Server
+ """
+ return ServerCredentials(_cygrpc.server_credentials_alts())
+
+
+def compute_engine_channel_credentials(call_credentials):
+ """Creates a compute engine channel credential.
+
+ This credential can only be used in a GCP environment as it relies on
+ a handshaker service. For more info about ALTS, see
+ https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security
+
+ This channel credential is expected to be used as part of a composite
+ credential in conjunction with a call credentials that authenticates the
+ VM's default service account. If used with any other sort of call
+ credential, the connection may suddenly and unexpectedly begin failing RPCs.
+ """
+ return ChannelCredentials(
+ _cygrpc.channel_credentials_compute_engine(
+ call_credentials._credentials))
+
+
+def channel_ready_future(channel):
+ """Creates a Future that tracks when a Channel is ready.
+
+ Cancelling the Future does not affect the channel's state machine.
+ It merely decouples the Future from channel state machine.
+
+ Args:
+ channel: A Channel object.
+
+ Returns:
+ A Future object that matures when the channel connectivity is
+ ChannelConnectivity.READY.
+ """
+ from grpc import _utilities # pylint: disable=cyclic-import
+ return _utilities.channel_ready_future(channel)
+
+
+def insecure_channel(target, options=None, compression=None):
+ """Creates an insecure Channel to a server.
+
+ The returned Channel is thread-safe.
+
+ Args:
+ target: The server address
+ options: An optional list of key-value pairs (:term:`channel_arguments`
+ in gRPC Core runtime) to configure the channel.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+
+ Returns:
+ A Channel.
+ """
+ from grpc import _channel # pylint: disable=cyclic-import
+ return _channel.Channel(target, () if options is None else options, None,
+ compression)
+
+
+def secure_channel(target, credentials, options=None, compression=None):
+ """Creates a secure Channel to a server.
+
+ The returned Channel is thread-safe.
+
+ Args:
+ target: The server address.
+ credentials: A ChannelCredentials instance.
+ options: An optional list of key-value pairs (:term:`channel_arguments`
+ in gRPC Core runtime) to configure the channel.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+
+ Returns:
+ A Channel.
+ """
+ from grpc import _channel # pylint: disable=cyclic-import
+ from grpc.experimental import _insecure_channel_credentials
+ if credentials._credentials is _insecure_channel_credentials:
+ raise ValueError(
+ "secure_channel cannot be called with insecure credentials." +
+ " Call insecure_channel instead.")
+ return _channel.Channel(target, () if options is None else options,
+ credentials._credentials, compression)
+
+
+def intercept_channel(channel, *interceptors):
+ """Intercepts a channel through a set of interceptors.
+
+ Args:
+ channel: A Channel.
+ interceptors: Zero or more objects of type
+ UnaryUnaryClientInterceptor,
+ UnaryStreamClientInterceptor,
+ StreamUnaryClientInterceptor, or
+ StreamStreamClientInterceptor.
+ Interceptors are given control in the order they are listed.
+
+ Returns:
+ A Channel that intercepts each invocation via the provided interceptors.
+
+ Raises:
+ TypeError: If interceptor does not derive from any of
+ UnaryUnaryClientInterceptor,
+ UnaryStreamClientInterceptor,
+ StreamUnaryClientInterceptor, or
+ StreamStreamClientInterceptor.
+ """
+ from grpc import _interceptor # pylint: disable=cyclic-import
+ return _interceptor.intercept_channel(channel, *interceptors)
+
+
+def server(thread_pool,
+ handlers=None,
+ interceptors=None,
+ options=None,
+ maximum_concurrent_rpcs=None,
+ compression=None,
+ xds=False):
+ """Creates a Server with which RPCs can be serviced.
+
+ Args:
+ thread_pool: A futures.ThreadPoolExecutor to be used by the Server
+ to execute RPC handlers.
+ handlers: An optional list of GenericRpcHandlers used for executing RPCs.
+ More handlers may be added by calling add_generic_rpc_handlers any time
+ before the server is started.
+ interceptors: An optional list of ServerInterceptor objects that observe
+ and optionally manipulate the incoming RPCs before handing them over to
+ handlers. The interceptors are given control in the order they are
+ specified. This is an EXPERIMENTAL API.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC runtime)
+ to configure the channel.
+ maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
+ will service before returning RESOURCE_EXHAUSTED status, or None to
+ indicate no limit.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This compression algorithm will be used for the
+ lifetime of the server unless overridden.
+ xds: If set to true, retrieves server configuration via xDS. This is an
+ EXPERIMENTAL option.
+
+ Returns:
+ A Server object.
+ """
+ from grpc import _server # pylint: disable=cyclic-import
+ return _server.create_server(thread_pool,
+ () if handlers is None else handlers,
+ () if interceptors is None else interceptors,
+ () if options is None else options,
+ maximum_concurrent_rpcs, compression, xds)
+
+
+@contextlib.contextmanager
+def _create_servicer_context(rpc_event, state, request_deserializer):
+ from grpc import _server # pylint: disable=cyclic-import
+ context = _server._Context(rpc_event, state, request_deserializer)
+ yield context
+ context._finalize_state() # pylint: disable=protected-access
+
+
+@enum.unique
+class Compression(enum.IntEnum):
+ """Indicates the compression method to be used for an RPC.
+
+ Attributes:
+ NoCompression: Do not use compression algorithm.
+ Deflate: Use "Deflate" compression algorithm.
+ Gzip: Use "Gzip" compression algorithm.
+ """
+ NoCompression = _compression.NoCompression
+ Deflate = _compression.Deflate
+ Gzip = _compression.Gzip
+
+
+################################### __all__ #################################
+
+__all__ = (
+ 'FutureTimeoutError',
+ 'FutureCancelledError',
+ 'Future',
+ 'ChannelConnectivity',
+ 'StatusCode',
+ 'Status',
+ 'RpcError',
+ 'RpcContext',
+ 'Call',
+ 'ChannelCredentials',
+ 'CallCredentials',
+ 'AuthMetadataContext',
+ 'AuthMetadataPluginCallback',
+ 'AuthMetadataPlugin',
+ 'Compression',
+ 'ClientCallDetails',
+ 'ServerCertificateConfiguration',
+ 'ServerCredentials',
+ 'LocalConnectionType',
+ 'UnaryUnaryMultiCallable',
+ 'UnaryStreamMultiCallable',
+ 'StreamUnaryMultiCallable',
+ 'StreamStreamMultiCallable',
+ 'UnaryUnaryClientInterceptor',
+ 'UnaryStreamClientInterceptor',
+ 'StreamUnaryClientInterceptor',
+ 'StreamStreamClientInterceptor',
+ 'Channel',
+ 'ServicerContext',
+ 'RpcMethodHandler',
+ 'HandlerCallDetails',
+ 'GenericRpcHandler',
+ 'ServiceRpcHandler',
+ 'Server',
+ 'ServerInterceptor',
+ 'unary_unary_rpc_method_handler',
+ 'unary_stream_rpc_method_handler',
+ 'stream_unary_rpc_method_handler',
+ 'stream_stream_rpc_method_handler',
+ 'method_handlers_generic_handler',
+ 'ssl_channel_credentials',
+ 'metadata_call_credentials',
+ 'access_token_call_credentials',
+ 'composite_call_credentials',
+ 'composite_channel_credentials',
+ 'compute_engine_channel_credentials',
+ 'local_channel_credentials',
+ 'local_server_credentials',
+ 'alts_channel_credentials',
+ 'alts_server_credentials',
+ 'ssl_server_credentials',
+ 'ssl_server_certificate_configuration',
+ 'dynamic_ssl_server_credentials',
+ 'channel_ready_future',
+ 'insecure_channel',
+ 'secure_channel',
+ 'intercept_channel',
+ 'server',
+ 'protos',
+ 'services',
+ 'protos_and_services',
+ 'xds_channel_credentials',
+ 'xds_server_credentials',
+ 'insecure_server_credentials',
+)
+
+############################### Extension Shims ################################
+
+# Here to maintain backwards compatibility; avoid using these in new code!
+try:
+ import grpc_tools
+ sys.modules.update({'grpc.tools': grpc_tools})
+except ImportError:
+ pass
+try:
+ import grpc_health
+ sys.modules.update({'grpc.health': grpc_health})
+except ImportError:
+ pass
+try:
+ import grpc_reflection
+ sys.modules.update({'grpc.reflection': grpc_reflection})
+except ImportError:
+ pass
+
+# Prevents import order issue in the case of renamed path.
+if sys.version_info >= (3, 6) and __name__ == "grpc":
+ from grpc import aio # pylint: disable=ungrouped-imports
+ sys.modules.update({'grpc.aio': aio})
diff --git a/contrib/python/grpcio/py3/grpc/_auth.py b/contrib/python/grpcio/py3/grpc/_auth.py
new file mode 100644
index 0000000000..2095957072
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_auth.py
@@ -0,0 +1,68 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""GRPCAuthMetadataPlugins for standard authentication."""
+
+import inspect
+from typing import Any, Optional
+
+import grpc
+
+
+def _sign_request(callback: grpc.AuthMetadataPluginCallback,
+ token: Optional[str], error: Optional[Exception]):
+ metadata = (('authorization', 'Bearer {}'.format(token)),)
+ callback(metadata, error)
+
+
+class GoogleCallCredentials(grpc.AuthMetadataPlugin):
+ """Metadata wrapper for GoogleCredentials from the oauth2client library."""
+ _is_jwt: bool
+ _credentials: Any
+
+ # TODO(xuanwn): Give credentials an actual type.
+ def __init__(self, credentials: Any):
+ self._credentials = credentials
+ # Hack to determine if these are JWT creds and we need to pass
+ # additional_claims when getting a token
+ self._is_jwt = 'additional_claims' in inspect.getfullargspec(
+ credentials.get_access_token).args
+
+ def __call__(self, context: grpc.AuthMetadataContext,
+ callback: grpc.AuthMetadataPluginCallback):
+ try:
+ if self._is_jwt:
+ access_token = self._credentials.get_access_token(
+ additional_claims={
+ 'aud':
+ context.
+ service_url # pytype: disable=attribute-error
+ }).access_token
+ else:
+ access_token = self._credentials.get_access_token().access_token
+ except Exception as exception: # pylint: disable=broad-except
+ _sign_request(callback, None, exception)
+ else:
+ _sign_request(callback, access_token, None)
+
+
+class AccessTokenAuthMetadataPlugin(grpc.AuthMetadataPlugin):
+ """Metadata wrapper for raw access token credentials."""
+ _access_token: str
+
+ def __init__(self, access_token: str):
+ self._access_token = access_token
+
+ def __call__(self, context: grpc.AuthMetadataContext,
+ callback: grpc.AuthMetadataPluginCallback):
+ _sign_request(callback, self._access_token, None)
diff --git a/contrib/python/grpcio/py3/grpc/_channel.py b/contrib/python/grpcio/py3/grpc/_channel.py
new file mode 100644
index 0000000000..d31344fd0e
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_channel.py
@@ -0,0 +1,1767 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Invocation-side implementation of gRPC Python."""
+
+import copy
+import functools
+import logging
+import os
+import sys
+import threading
+import time
+import types
+from typing import (Any, Callable, Iterator, List, Optional, Sequence, Set,
+ Tuple, Union)
+
+import grpc # pytype: disable=pyi-error
+from grpc import _common # pytype: disable=pyi-error
+from grpc import _compression # pytype: disable=pyi-error
+from grpc import _grpcio_metadata # pytype: disable=pyi-error
+from grpc._cython import cygrpc
+from grpc._typing import ChannelArgumentType
+from grpc._typing import DeserializingFunction
+from grpc._typing import IntegratedCallFactory
+from grpc._typing import MetadataType
+from grpc._typing import NullaryCallbackType
+from grpc._typing import ResponseType
+from grpc._typing import SerializingFunction
+from grpc._typing import UserTag
+import grpc.experimental # pytype: disable=pyi-error
+
+_LOGGER = logging.getLogger(__name__)
+
+_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
+
+_EMPTY_FLAGS = 0
+
+# NOTE(rbellevi): No guarantees are given about the maintenance of this
+# environment variable.
+_DEFAULT_SINGLE_THREADED_UNARY_STREAM = os.getenv(
+ "GRPC_SINGLE_THREADED_UNARY_STREAM") is not None
+
+_UNARY_UNARY_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_UNARY_STREAM_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.send_message,
+ cygrpc.OperationType.send_close_from_client,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_UNARY_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_message,
+ cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_STREAM_INITIAL_DUE = (
+ cygrpc.OperationType.send_initial_metadata,
+ cygrpc.OperationType.receive_initial_metadata,
+ cygrpc.OperationType.receive_status_on_client,
+)
+
+_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
+ 'Exception calling channel subscription callback!')
+
+_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '>')
+
+_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '\tdebug_error_string = "{}"\n'
+ '>')
+
+
+def _deadline(timeout: Optional[float]) -> Optional[float]:
+ return None if timeout is None else time.time() + timeout
+
+
+def _unknown_code_details(unknown_cygrpc_code: Optional[grpc.StatusCode],
+ details: Optional[str]) -> str:
+ return 'Server sent unknown code {} and details "{}"'.format(
+ unknown_cygrpc_code, details)
+
+
+class _RPCState(object):
+ condition: threading.Condition
+ due: Set[cygrpc.OperationType]
+ initial_metadata: Optional[MetadataType]
+ response: Any
+ trailing_metadata: Optional[MetadataType]
+ code: Optional[grpc.StatusCode]
+ details: Optional[str]
+ debug_error_string: Optional[str]
+ cancelled: bool
+ callbacks: List[NullaryCallbackType]
+ fork_epoch: Optional[int]
+
+ def __init__(self, due: Sequence[cygrpc.OperationType],
+ initial_metadata: Optional[MetadataType],
+ trailing_metadata: Optional[MetadataType],
+ code: Optional[grpc.StatusCode], details: Optional[str]):
+ # `condition` guards all members of _RPCState. `notify_all` is called on
+ # `condition` when the state of the RPC has changed.
+ self.condition = threading.Condition()
+
+ # The cygrpc.OperationType objects representing events due from the RPC's
+ # completion queue. If an operation is in `due`, it is guaranteed that
+ # `operate()` has been called on a corresponding operation. But the
+ # converse is not true. That is, in the case of failed `operate()`
+ # calls, there may briefly be events in `due` that do not correspond to
+ # operations submitted to Core.
+ self.due = set(due)
+ self.initial_metadata = initial_metadata
+ self.response = None
+ self.trailing_metadata = trailing_metadata
+ self.code = code
+ self.details = details
+ self.debug_error_string = None
+
+ # The semantics of grpc.Future.cancel and grpc.Future.cancelled are
+ # slightly wonky, so they have to be tracked separately from the rest of the
+ # result of the RPC. This field tracks whether cancellation was requested
+ # prior to termination of the RPC.
+ self.cancelled = False
+ self.callbacks = []
+ self.fork_epoch = cygrpc.get_fork_epoch()
+
+ def reset_postfork_child(self):
+ self.condition = threading.Condition()
+
+
+def _abort(state: _RPCState, code: grpc.StatusCode, details: str) -> None:
+ if state.code is None:
+ state.code = code
+ state.details = details
+ if state.initial_metadata is None:
+ state.initial_metadata = ()
+ state.trailing_metadata = ()
+
+
+def _handle_event(
+ event: cygrpc.BaseEvent, state: _RPCState,
+ response_deserializer: Optional[DeserializingFunction]
+) -> List[NullaryCallbackType]:
+ callbacks = []
+ for batch_operation in event.batch_operations:
+ operation_type = batch_operation.type()
+ state.due.remove(operation_type)
+ if operation_type == cygrpc.OperationType.receive_initial_metadata:
+ state.initial_metadata = batch_operation.initial_metadata()
+ elif operation_type == cygrpc.OperationType.receive_message:
+ serialized_response = batch_operation.message()
+ if serialized_response is not None:
+ response = _common.deserialize(serialized_response,
+ response_deserializer)
+ if response is None:
+ details = 'Exception deserializing response!'
+ _abort(state, grpc.StatusCode.INTERNAL, details)
+ else:
+ state.response = response
+ elif operation_type == cygrpc.OperationType.receive_status_on_client:
+ state.trailing_metadata = batch_operation.trailing_metadata()
+ if state.code is None:
+ code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
+ batch_operation.code())
+ if code is None:
+ state.code = grpc.StatusCode.UNKNOWN
+ state.details = _unknown_code_details(
+ code, batch_operation.details())
+ else:
+ state.code = code
+ state.details = batch_operation.details()
+ state.debug_error_string = batch_operation.error_string()
+ callbacks.extend(state.callbacks)
+ state.callbacks = None
+ return callbacks
+
+
+def _event_handler(
+ state: _RPCState,
+ response_deserializer: Optional[DeserializingFunction]) -> UserTag:
+
+ def handle_event(event):
+ with state.condition:
+ callbacks = _handle_event(event, state, response_deserializer)
+ state.condition.notify_all()
+ done = not state.due
+ for callback in callbacks:
+ try:
+ callback()
+ except Exception as e: # pylint: disable=broad-except
+ # NOTE(rbellevi): We suppress but log errors here so as not to
+ # kill the channel spin thread.
+ logging.error('Exception in callback %s: %s',
+ repr(callback.func), repr(e))
+ return done and state.fork_epoch >= cygrpc.get_fork_epoch()
+
+ return handle_event
+
+
+# TODO(xuanwn): Create a base class for IntegratedCall and SegregatedCall.
+#pylint: disable=too-many-statements
+def _consume_request_iterator(request_iterator: Iterator, state: _RPCState,
+ call: Union[cygrpc.IntegratedCall,
+ cygrpc.SegregatedCall],
+ request_serializer: SerializingFunction,
+ event_handler: Optional[UserTag]) -> None:
+ """Consume a request supplied by the user."""
+
+ def consume_request_iterator(): # pylint: disable=too-many-branches
+ # Iterate over the request iterator until it is exhausted or an error
+ # condition is encountered.
+ while True:
+ return_from_user_request_generator_invoked = False
+ try:
+ # The thread may die in user-code. Do not block fork for this.
+ cygrpc.enter_user_request_generator()
+ request = next(request_iterator)
+ except StopIteration:
+ break
+ except Exception: # pylint: disable=broad-except
+ cygrpc.return_from_user_request_generator()
+ return_from_user_request_generator_invoked = True
+ code = grpc.StatusCode.UNKNOWN
+ details = 'Exception iterating requests!'
+ _LOGGER.exception(details)
+ call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
+ details)
+ _abort(state, code, details)
+ return
+ finally:
+ if not return_from_user_request_generator_invoked:
+ cygrpc.return_from_user_request_generator()
+ serialized_request = _common.serialize(request, request_serializer)
+ with state.condition:
+ if state.code is None and not state.cancelled:
+ if serialized_request is None:
+ code = grpc.StatusCode.INTERNAL
+ details = 'Exception serializing request!'
+ call.cancel(
+ _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
+ details)
+ _abort(state, code, details)
+ return
+ else:
+ state.due.add(cygrpc.OperationType.send_message)
+ operations = (cygrpc.SendMessageOperation(
+ serialized_request, _EMPTY_FLAGS),)
+ operating = call.operate(operations, event_handler)
+ if not operating:
+ state.due.remove(cygrpc.OperationType.send_message)
+ return
+
+ def _done():
+ return (state.code is not None or
+ cygrpc.OperationType.send_message
+ not in state.due)
+
+ _common.wait(state.condition.wait,
+ _done,
+ spin_cb=functools.partial(
+ cygrpc.block_if_fork_in_progress,
+ state))
+ if state.code is not None:
+ return
+ else:
+ return
+ with state.condition:
+ if state.code is None:
+ state.due.add(cygrpc.OperationType.send_close_from_client)
+ operations = (
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
+ operating = call.operate(operations, event_handler)
+ if not operating:
+ state.due.remove(
+ cygrpc.OperationType.send_close_from_client)
+
+ consumption_thread = cygrpc.ForkManagedThread(
+ target=consume_request_iterator)
+ consumption_thread.setDaemon(True)
+ consumption_thread.start()
+
+
+def _rpc_state_string(class_name: str, rpc_state: _RPCState) -> str:
+ """Calculates error string for RPC."""
+ with rpc_state.condition:
+ if rpc_state.code is None:
+ return '<{} object>'.format(class_name)
+ elif rpc_state.code is grpc.StatusCode.OK:
+ return _OK_RENDEZVOUS_REPR_FORMAT.format(class_name, rpc_state.code,
+ rpc_state.details)
+ else:
+ return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
+ class_name, rpc_state.code, rpc_state.details,
+ rpc_state.debug_error_string)
+
+
+class _InactiveRpcError(grpc.RpcError, grpc.Call, grpc.Future):
+ """An RPC error not tied to the execution of a particular RPC.
+
+ The RPC represented by the state object must not be in-progress or
+ cancelled.
+
+ Attributes:
+ _state: An instance of _RPCState.
+ """
+ _state: _RPCState
+
+ def __init__(self, state: _RPCState):
+ with state.condition:
+ self._state = _RPCState((), copy.deepcopy(state.initial_metadata),
+ copy.deepcopy(state.trailing_metadata),
+ state.code, copy.deepcopy(state.details))
+ self._state.response = copy.copy(state.response)
+ self._state.debug_error_string = copy.copy(state.debug_error_string)
+
+ def initial_metadata(self) -> Optional[MetadataType]:
+ return self._state.initial_metadata
+
+ def trailing_metadata(self) -> Optional[MetadataType]:
+ return self._state.trailing_metadata
+
+ def code(self) -> Optional[grpc.StatusCode]:
+ return self._state.code
+
+ def details(self) -> Optional[str]:
+ return _common.decode(self._state.details)
+
+ def debug_error_string(self) -> Optional[str]:
+ return _common.decode(self._state.debug_error_string)
+
+ def _repr(self) -> str:
+ return _rpc_state_string(self.__class__.__name__, self._state)
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+ def cancel(self) -> bool:
+ """See grpc.Future.cancel."""
+ return False
+
+ def cancelled(self) -> bool:
+ """See grpc.Future.cancelled."""
+ return False
+
+ def running(self) -> bool:
+ """See grpc.Future.running."""
+ return False
+
+ def done(self) -> bool:
+ """See grpc.Future.done."""
+ return True
+
+ def result(self, timeout: Optional[float] = None) -> Any: # pylint: disable=unused-argument
+ """See grpc.Future.result."""
+ raise self
+
+ def exception(self, timeout: Optional[float] = None) -> Optional[Exception]: # pylint: disable=unused-argument
+ """See grpc.Future.exception."""
+ return self
+
+ def traceback(
+ self,
+ timeout: Optional[float] = None # pylint: disable=unused-argument
+ ) -> Optional[types.TracebackType]:
+ """See grpc.Future.traceback."""
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(
+ self,
+ fn: Callable[[grpc.Future], None],
+ timeout: Optional[float] = None) -> None: # pylint: disable=unused-argument
+ """See grpc.Future.add_done_callback."""
+ fn(self)
+
+
+class _Rendezvous(grpc.RpcError, grpc.RpcContext):
+ """An RPC iterator.
+
+ Attributes:
+ _state: An instance of _RPCState.
+ _call: An instance of SegregatedCall or IntegratedCall.
+ In either case, the _call object is expected to have operate, cancel,
+ and next_event methods.
+ _response_deserializer: A callable taking bytes and return a Python
+ object.
+ _deadline: A float representing the deadline of the RPC in seconds. Or
+ possibly None, to represent an RPC with no deadline at all.
+ """
+ _state: _RPCState
+ _call: Union[cygrpc.SegregatedCall, cygrpc.IntegratedCall]
+ _response_deserializer: Optional[DeserializingFunction]
+ _deadline: Optional[float]
+
+ def __init__(self, state: _RPCState, call: Union[cygrpc.SegregatedCall,
+ cygrpc.IntegratedCall],
+ response_deserializer: Optional[DeserializingFunction],
+ deadline: Optional[float]):
+ super(_Rendezvous, self).__init__()
+ self._state = state
+ self._call = call
+ self._response_deserializer = response_deserializer
+ self._deadline = deadline
+
+ def is_active(self) -> bool:
+ """See grpc.RpcContext.is_active"""
+ with self._state.condition:
+ return self._state.code is None
+
+ def time_remaining(self) -> Optional[float]:
+ """See grpc.RpcContext.time_remaining"""
+ with self._state.condition:
+ if self._deadline is None:
+ return None
+ else:
+ return max(self._deadline - time.time(), 0)
+
+ def cancel(self) -> bool:
+ """See grpc.RpcContext.cancel"""
+ with self._state.condition:
+ if self._state.code is None:
+ code = grpc.StatusCode.CANCELLED
+ details = 'Locally cancelled by application!'
+ self._call.cancel(
+ _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
+ self._state.cancelled = True
+ _abort(self._state, code, details)
+ self._state.condition.notify_all()
+ return True
+ else:
+ return False
+
+ def add_callback(self, callback: NullaryCallbackType) -> bool:
+ """See grpc.RpcContext.add_callback"""
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return self._next()
+
+ def __next__(self):
+ return self._next()
+
+ def _next(self):
+ raise NotImplementedError()
+
+ def debug_error_string(self) -> Optional[str]:
+ raise NotImplementedError()
+
+ def _repr(self) -> str:
+ return _rpc_state_string(self.__class__.__name__, self._state)
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+ def __del__(self) -> None:
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.code = grpc.StatusCode.CANCELLED
+ self._state.details = 'Cancelled upon garbage collection!'
+ self._state.cancelled = True
+ self._call.cancel(
+ _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
+ self._state.details)
+ self._state.condition.notify_all()
+
+
+class _SingleThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
+ """An RPC iterator operating entirely on a single thread.
+
+ The __next__ method of _SingleThreadedRendezvous does not depend on the
+ existence of any other thread, including the "channel spin thread".
+ However, this means that its interface is entirely synchronous. So this
+ class cannot completely fulfill the grpc.Future interface. The result,
+ exception, and traceback methods will never block and will instead raise
+ an exception if calling the method would result in blocking.
+
+ This means that these methods are safe to call from add_done_callback
+ handlers.
+ """
+ _state: _RPCState
+
+ def _is_complete(self) -> bool:
+ return self._state.code is not None
+
+ def cancelled(self) -> bool:
+ with self._state.condition:
+ return self._state.cancelled
+
+ def running(self) -> bool:
+ with self._state.condition:
+ return self._state.code is None
+
+ def done(self) -> bool:
+ with self._state.condition:
+ return self._state.code is not None
+
+ def result(self, timeout: Optional[float] = None) -> Any:
+ """Returns the result of the computation or raises its exception.
+
+ This method will never block. Instead, it will raise an exception
+ if calling this method would otherwise result in blocking.
+
+ Since this method will never block, any `timeout` argument passed will
+ be ignored.
+ """
+ del timeout
+ with self._state.condition:
+ if not self._is_complete():
+ raise grpc.experimental.UsageError(
+ "_SingleThreadedRendezvous only supports result() when the RPC is complete."
+ )
+ if self._state.code is grpc.StatusCode.OK:
+ return self._state.response
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ raise self
+
+ def exception(self, timeout: Optional[float] = None) -> Optional[Exception]:
+ """Return the exception raised by the computation.
+
+ This method will never block. Instead, it will raise an exception
+ if calling this method would otherwise result in blocking.
+
+ Since this method will never block, any `timeout` argument passed will
+ be ignored.
+ """
+ del timeout
+ with self._state.condition:
+ if not self._is_complete():
+ raise grpc.experimental.UsageError(
+ "_SingleThreadedRendezvous only supports exception() when the RPC is complete."
+ )
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ return self
+
+ def traceback(
+ self,
+ timeout: Optional[float] = None) -> Optional[types.TracebackType]:
+ """Access the traceback of the exception raised by the computation.
+
+ This method will never block. Instead, it will raise an exception
+ if calling this method would otherwise result in blocking.
+
+ Since this method will never block, any `timeout` argument passed will
+ be ignored.
+ """
+ del timeout
+ with self._state.condition:
+ if not self._is_complete():
+ raise grpc.experimental.UsageError(
+ "_SingleThreadedRendezvous only supports traceback() when the RPC is complete."
+ )
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn: Callable[[grpc.Future], None]) -> None:
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.callbacks.append(functools.partial(fn, self))
+ return
+
+ fn(self)
+
+ def initial_metadata(self) -> Optional[MetadataType]:
+ """See grpc.Call.initial_metadata"""
+ with self._state.condition:
+ # NOTE(gnossen): Based on our initial call batch, we are guaranteed
+ # to receive initial metadata before any messages.
+ while self._state.initial_metadata is None:
+ self._consume_next_event()
+ return self._state.initial_metadata
+
+ def trailing_metadata(self) -> Optional[MetadataType]:
+ """See grpc.Call.trailing_metadata"""
+ with self._state.condition:
+ if self._state.trailing_metadata is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get trailing metadata until RPC is completed.")
+ return self._state.trailing_metadata
+
+ def code(self) -> Optional[grpc.StatusCode]:
+ """See grpc.Call.code"""
+ with self._state.condition:
+ if self._state.code is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get code until RPC is completed.")
+ return self._state.code
+
+ def details(self) -> Optional[str]:
+ """See grpc.Call.details"""
+ with self._state.condition:
+ if self._state.details is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get details until RPC is completed.")
+ return _common.decode(self._state.details)
+
+ def _consume_next_event(self) -> Optional[cygrpc.BaseEvent]:
+ event = self._call.next_event()
+ with self._state.condition:
+ callbacks = _handle_event(event, self._state,
+ self._response_deserializer)
+ for callback in callbacks:
+ # NOTE(gnossen): We intentionally allow exceptions to bubble up
+ # to the user when running on a single thread.
+ callback()
+ return event
+
+ def _next_response(self) -> Any:
+ while True:
+ self._consume_next_event()
+ with self._state.condition:
+ if self._state.response is not None:
+ response = self._state.response
+ self._state.response = None
+ return response
+ elif cygrpc.OperationType.receive_message not in self._state.due:
+ if self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ elif self._state.code is not None:
+ raise self
+
+ def _next(self) -> Any:
+ with self._state.condition:
+ if self._state.code is None:
+ # We tentatively add the operation as expected and remove
+ # it if the enqueue operation fails. This allows us to guarantee that
+ # if an event has been submitted to the core completion queue,
+ # it is in `due`. If we waited until after a successful
+ # enqueue operation then a signal could interrupt this
+ # thread between the enqueue operation and the addition of the
+ # operation to `due`. This would cause an exception on the
+ # channel spin thread when the operation completes and no
+ # corresponding operation would be present in state.due.
+ # Note that, since `condition` is held through this block, there is
+ # no data race on `due`.
+ self._state.due.add(cygrpc.OperationType.receive_message)
+ operating = self._call.operate(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), None)
+ if not operating:
+ self._state.due.remove(cygrpc.OperationType.receive_message)
+ elif self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ else:
+ raise self
+ return self._next_response()
+
+ def debug_error_string(self) -> Optional[str]:
+ with self._state.condition:
+ if self._state.debug_error_string is None:
+ raise grpc.experimental.UsageError(
+ "Cannot get debug error string until RPC is completed.")
+ return _common.decode(self._state.debug_error_string)
+
+
+class _MultiThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
+ """An RPC iterator that depends on a channel spin thread.
+
+ This iterator relies upon a per-channel thread running in the background,
+ dequeueing events from the completion queue, and notifying threads waiting
+ on the threading.Condition object in the _RPCState object.
+
+ This extra thread allows _MultiThreadedRendezvous to fulfill the grpc.Future interface
+ and to mediate a bidirection streaming RPC.
+ """
+ _state: _RPCState
+
+ def initial_metadata(self) -> Optional[MetadataType]:
+ """See grpc.Call.initial_metadata"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.initial_metadata is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return self._state.initial_metadata
+
+ def trailing_metadata(self) -> Optional[MetadataType]:
+ """See grpc.Call.trailing_metadata"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.trailing_metadata is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return self._state.trailing_metadata
+
+ def code(self) -> Optional[grpc.StatusCode]:
+ """See grpc.Call.code"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.code is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return self._state.code
+
+ def details(self) -> Optional[str]:
+ """See grpc.Call.details"""
+ with self._state.condition:
+
+ def _done():
+ return self._state.details is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return _common.decode(self._state.details)
+
+ def debug_error_string(self) -> Optional[str]:
+ with self._state.condition:
+
+ def _done():
+ return self._state.debug_error_string is not None
+
+ _common.wait(self._state.condition.wait, _done)
+ return _common.decode(self._state.debug_error_string)
+
+ def cancelled(self) -> bool:
+ with self._state.condition:
+ return self._state.cancelled
+
+ def running(self) -> bool:
+ with self._state.condition:
+ return self._state.code is None
+
+ def done(self) -> bool:
+ with self._state.condition:
+ return self._state.code is not None
+
+ def _is_complete(self) -> bool:
+ return self._state.code is not None
+
+ def result(self, timeout: Optional[float] = None) -> Any:
+ """Returns the result of the computation or raises its exception.
+
+ See grpc.Future.result for the full API contract.
+ """
+ with self._state.condition:
+ timed_out = _common.wait(self._state.condition.wait,
+ self._is_complete,
+ timeout=timeout)
+ if timed_out:
+ raise grpc.FutureTimeoutError()
+ else:
+ if self._state.code is grpc.StatusCode.OK:
+ return self._state.response
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ raise self
+
+ def exception(self, timeout: Optional[float] = None) -> Optional[Exception]:
+ """Return the exception raised by the computation.
+
+ See grpc.Future.exception for the full API contract.
+ """
+ with self._state.condition:
+ timed_out = _common.wait(self._state.condition.wait,
+ self._is_complete,
+ timeout=timeout)
+ if timed_out:
+ raise grpc.FutureTimeoutError()
+ else:
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ return self
+
+ def traceback(
+ self,
+ timeout: Optional[float] = None) -> Optional[types.TracebackType]:
+ """Access the traceback of the exception raised by the computation.
+
+ See grpc.future.traceback for the full API contract.
+ """
+ with self._state.condition:
+ timed_out = _common.wait(self._state.condition.wait,
+ self._is_complete,
+ timeout=timeout)
+ if timed_out:
+ raise grpc.FutureTimeoutError()
+ else:
+ if self._state.code is grpc.StatusCode.OK:
+ return None
+ elif self._state.cancelled:
+ raise grpc.FutureCancelledError()
+ else:
+ try:
+ raise self
+ except grpc.RpcError:
+ return sys.exc_info()[2]
+
+ def add_done_callback(self, fn: Callable[[grpc.Future], None]) -> None:
+ with self._state.condition:
+ if self._state.code is None:
+ self._state.callbacks.append(functools.partial(fn, self))
+ return
+
+ fn(self)
+
+ def _next(self) -> Any:
+ with self._state.condition:
+ if self._state.code is None:
+ event_handler = _event_handler(self._state,
+ self._response_deserializer)
+ self._state.due.add(cygrpc.OperationType.receive_message)
+ operating = self._call.operate(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
+ event_handler)
+ if not operating:
+ self._state.due.remove(cygrpc.OperationType.receive_message)
+ elif self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ else:
+ raise self
+
+ def _response_ready():
+ return (self._state.response is not None or
+ (cygrpc.OperationType.receive_message
+ not in self._state.due and
+ self._state.code is not None))
+
+ _common.wait(self._state.condition.wait, _response_ready)
+ if self._state.response is not None:
+ response = self._state.response
+ self._state.response = None
+ return response
+ elif cygrpc.OperationType.receive_message not in self._state.due:
+ if self._state.code is grpc.StatusCode.OK:
+ raise StopIteration()
+ elif self._state.code is not None:
+ raise self
+
+
+def _start_unary_request(
+ request: Any, timeout: Optional[float],
+ request_serializer: SerializingFunction
+) -> Tuple[Optional[float], Optional[bytes], Optional[grpc.RpcError]]:
+ deadline = _deadline(timeout)
+ serialized_request = _common.serialize(request, request_serializer)
+ if serialized_request is None:
+ state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
+ 'Exception serializing request!')
+ error = _InactiveRpcError(state)
+ return deadline, None, error
+ else:
+ return deadline, serialized_request, None
+
+
+def _end_unary_response_blocking(
+ state: _RPCState, call: cygrpc.SegregatedCall, with_call: bool,
+ deadline: Optional[float]
+) -> Union[ResponseType, Tuple[ResponseType, grpc.Call]]:
+ if state.code is grpc.StatusCode.OK:
+ if with_call:
+ rendezvous = _MultiThreadedRendezvous(state, call, None, deadline)
+ return state.response, rendezvous
+ else:
+ return state.response
+ else:
+ raise _InactiveRpcError(state) # pytype: disable=not-instantiable
+
+
+def _stream_unary_invocation_operations(
+ metadata: Optional[MetadataType],
+ initial_metadata_flags: int) -> Sequence[Sequence[cygrpc.Operation]]:
+ return (
+ (
+ cygrpc.SendInitialMetadataOperation(metadata,
+ initial_metadata_flags),
+ cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ ),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+
+
+def _stream_unary_invocation_operations_and_tags(
+ metadata: Optional[MetadataType], initial_metadata_flags: int
+) -> Sequence[Tuple[Sequence[cygrpc.Operation], Optional[UserTag]]]:
+ return tuple((
+ operations,
+ None,
+ ) for operations in _stream_unary_invocation_operations(
+ metadata, initial_metadata_flags))
+
+
+def _determine_deadline(user_deadline: Optional[float]) -> Optional[float]:
+ parent_deadline = cygrpc.get_deadline_from_context()
+ if parent_deadline is None and user_deadline is None:
+ return None
+ elif parent_deadline is not None and user_deadline is None:
+ return parent_deadline
+ elif user_deadline is not None and parent_deadline is None:
+ return user_deadline
+ else:
+ return min(parent_deadline, user_deadline)
+
+
+class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+ _channel: cygrpc.Channel
+ _managed_call: IntegratedCallFactory
+ _method: bytes
+ _request_serializer: Optional[SerializingFunction]
+ _response_deserializer: Optional[DeserializingFunction]
+ _context: Any
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel: cygrpc.Channel,
+ managed_call: IntegratedCallFactory, method: bytes,
+ request_serializer: Optional[SerializingFunction],
+ response_deserializer: Optional[DeserializingFunction]):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def _prepare(
+ self, request: Any, timeout: Optional[float],
+ metadata: Optional[MetadataType], wait_for_ready: Optional[bool],
+ compression: Optional[grpc.Compression]
+ ) -> Tuple[Optional[_RPCState], Optional[Sequence[cygrpc.Operation]],
+ Optional[float], Optional[grpc.RpcError]]:
+ deadline, serialized_request, rendezvous = _start_unary_request(
+ request, timeout, self._request_serializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ if serialized_request is None:
+ return None, None, None, rendezvous
+ else:
+ state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
+ operations = (
+ cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ )
+ return state, operations, deadline, None
+
+ def _blocking(
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[_RPCState, cygrpc.SegregatedCall]:
+ state, operations, deadline, rendezvous = self._prepare(
+ request, timeout, metadata, wait_for_ready, compression)
+ if state is None:
+ raise rendezvous # pylint: disable-msg=raising-bad-type
+ else:
+ call = self._channel.segregated_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
+ self._method, None, _determine_deadline(deadline), metadata,
+ None if credentials is None else credentials._credentials, ((
+ operations,
+ None,
+ ),), self._context)
+ event = call.next_event()
+ _handle_event(event, state, self._response_deserializer)
+ return state, call
+
+ def __call__(self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None) -> Any:
+ state, call, = self._blocking(request, timeout, metadata, credentials,
+ wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, False, None)
+
+ def with_call(
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[Any, grpc.Call]:
+ state, call, = self._blocking(request, timeout, metadata, credentials,
+ wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, True, None)
+
+ def future(
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _MultiThreadedRendezvous:
+ state, operations, deadline, rendezvous = self._prepare(
+ request, timeout, metadata, wait_for_ready, compression)
+ if state is None:
+ raise rendezvous # pylint: disable-msg=raising-bad-type
+ else:
+ event_handler = _event_handler(state, self._response_deserializer)
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
+ self._method, None, deadline, metadata,
+ None if credentials is None else credentials._credentials,
+ (operations,), event_handler, self._context)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer,
+ deadline)
+
+
+class _SingleThreadedUnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+ _channel: cygrpc.Channel
+ _method: bytes
+ _request_serializer: Optional[SerializingFunction]
+ _response_deserializer: Optional[DeserializingFunction]
+ _context: Any
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel: cygrpc.Channel, method: bytes,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction):
+ self._channel = channel
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def __call__( # pylint: disable=too-many-locals
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _SingleThreadedRendezvous:
+ deadline = _deadline(timeout)
+ serialized_request = _common.serialize(request,
+ self._request_serializer)
+ if serialized_request is None:
+ state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
+ 'Exception serializing request!')
+ raise _InactiveRpcError(state)
+
+ state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
+ call_credentials = None if credentials is None else credentials._credentials
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ operations = (
+ (cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS)),
+ (cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+ operations_and_tags = tuple((ops, None) for ops in operations)
+ call = self._channel.segregated_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, _determine_deadline(deadline), metadata, call_credentials,
+ operations_and_tags, self._context)
+ return _SingleThreadedRendezvous(state, call,
+ self._response_deserializer, deadline)
+
+
+class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+ _channel: cygrpc.Channel
+ _managed_call: IntegratedCallFactory
+ _method: bytes
+ _request_serializer: Optional[SerializingFunction]
+ _response_deserializer: Optional[DeserializingFunction]
+ _context: Any
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel: cygrpc.Channel,
+ managed_call: IntegratedCallFactory, method: bytes,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def __call__( # pylint: disable=too-many-locals
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[
+ grpc.Compression] = None) -> _MultiThreadedRendezvous:
+ deadline, serialized_request, rendezvous = _start_unary_request(
+ request, timeout, self._request_serializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ if serialized_request is None:
+ raise rendezvous # pylint: disable-msg=raising-bad-type
+ else:
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
+ operations = (
+ (
+ cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.SendMessageOperation(serialized_request,
+ _EMPTY_FLAGS),
+ cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ ),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
+ self._method, None, _determine_deadline(deadline), metadata,
+ None if credentials is None else credentials._credentials,
+ operations, _event_handler(state, self._response_deserializer),
+ self._context)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer,
+ deadline)
+
+
+class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+ _channel: cygrpc.Channel
+ _managed_call: IntegratedCallFactory
+ _method: bytes
+ _request_serializer: Optional[SerializingFunction]
+ _response_deserializer: Optional[DeserializingFunction]
+ _context: Any
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, channel: cygrpc.Channel,
+ managed_call: IntegratedCallFactory, method: bytes,
+ request_serializer: Optional[SerializingFunction],
+ response_deserializer: Optional[DeserializingFunction]):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def _blocking(
+ self, request_iterator: Iterator, timeout: Optional[float],
+ metadata: Optional[MetadataType],
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], compression: Optional[grpc.Compression]
+ ) -> Tuple[_RPCState, cygrpc.SegregatedCall]:
+ deadline = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ call = self._channel.segregated_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, _determine_deadline(deadline), augmented_metadata,
+ None if credentials is None else credentials._credentials,
+ _stream_unary_invocation_operations_and_tags(
+ augmented_metadata, initial_metadata_flags), self._context)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer, None)
+ while True:
+ event = call.next_event()
+ with state.condition:
+ _handle_event(event, state, self._response_deserializer)
+ state.condition.notify_all()
+ if not state.due:
+ break
+ return state, call
+
+ def __call__(self,
+ request_iterator: Iterator,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None) -> Any:
+ state, call, = self._blocking(request_iterator, timeout, metadata,
+ credentials, wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, False, None)
+
+ def with_call(
+ self,
+ request_iterator: Iterator,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[Any, grpc.Call]:
+ state, call, = self._blocking(request_iterator, timeout, metadata,
+ credentials, wait_for_ready, compression)
+ return _end_unary_response_blocking(state, call, True, None)
+
+ def future(
+ self,
+ request_iterator: Iterator,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _MultiThreadedRendezvous:
+ deadline = _deadline(timeout)
+ state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
+ event_handler = _event_handler(state, self._response_deserializer)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, deadline, augmented_metadata,
+ None if credentials is None else credentials._credentials,
+ _stream_unary_invocation_operations(metadata,
+ initial_metadata_flags),
+ event_handler, self._context)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer, event_handler)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer, deadline)
+
+
+class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+ _channel: cygrpc.Channel
+ _managed_call: IntegratedCallFactory
+ _method: bytes
+ _request_serializer: Optional[SerializingFunction]
+ _response_deserializer: Optional[DeserializingFunction]
+ _context: Any
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ channel: cygrpc.Channel,
+ managed_call: IntegratedCallFactory,
+ method: bytes,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None):
+ self._channel = channel
+ self._managed_call = managed_call
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._context = cygrpc.build_census_context()
+
+ def __call__(
+ self,
+ request_iterator: Iterator,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _MultiThreadedRendezvous:
+ deadline = _deadline(timeout)
+ state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
+ initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
+ wait_for_ready)
+ augmented_metadata = _compression.augment_metadata(
+ metadata, compression)
+ operations = (
+ (
+ cygrpc.SendInitialMetadataOperation(augmented_metadata,
+ initial_metadata_flags),
+ cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+ ),
+ (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
+ )
+ event_handler = _event_handler(state, self._response_deserializer)
+ call = self._managed_call(
+ cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
+ None, _determine_deadline(deadline), augmented_metadata,
+ None if credentials is None else credentials._credentials,
+ operations, event_handler, self._context)
+ _consume_request_iterator(request_iterator, state, call,
+ self._request_serializer, event_handler)
+ return _MultiThreadedRendezvous(state, call,
+ self._response_deserializer, deadline)
+
+
+class _InitialMetadataFlags(int):
+ """Stores immutable initial metadata flags"""
+
+ def __new__(cls, value: int = _EMPTY_FLAGS):
+ value &= cygrpc.InitialMetadataFlags.used_mask
+ return super(_InitialMetadataFlags, cls).__new__(cls, value)
+
+ def with_wait_for_ready(self, wait_for_ready: Optional[bool]) -> int:
+ if wait_for_ready is not None:
+ if wait_for_ready:
+ return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
+ cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
+ elif not wait_for_ready:
+ return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
+ cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
+ return self
+
+
+class _ChannelCallState(object):
+ channel: cygrpc.Channel
+ managed_calls: int
+ threading: bool
+
+ def __init__(self, channel: cygrpc.Channel):
+ self.lock = threading.Lock()
+ self.channel = channel
+ self.managed_calls = 0
+ self.threading = False
+
+ def reset_postfork_child(self) -> None:
+ self.managed_calls = 0
+
+ def __del__(self):
+ try:
+ self.channel.close(cygrpc.StatusCode.cancelled,
+ 'Channel deallocated!')
+ except (TypeError, AttributeError):
+ pass
+
+
+def _run_channel_spin_thread(state: _ChannelCallState) -> None:
+
+ def channel_spin():
+ while True:
+ cygrpc.block_if_fork_in_progress(state)
+ event = state.channel.next_call_event()
+ if event.completion_type == cygrpc.CompletionType.queue_timeout:
+ continue
+ call_completed = event.tag(event)
+ if call_completed:
+ with state.lock:
+ state.managed_calls -= 1
+ if state.managed_calls == 0:
+ return
+
+ channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
+ channel_spin_thread.setDaemon(True)
+ channel_spin_thread.start()
+
+
+def _channel_managed_call_management(state: _ChannelCallState):
+
+ # pylint: disable=too-many-arguments
+ def create(flags: int, method: bytes, host: Optional[str],
+ deadline: Optional[float], metadata: Optional[MetadataType],
+ credentials: Optional[cygrpc.CallCredentials],
+ operations: Sequence[Sequence[cygrpc.Operation]],
+ event_handler: UserTag, context) -> cygrpc.IntegratedCall:
+ """Creates a cygrpc.IntegratedCall.
+
+ Args:
+ flags: An integer bitfield of call flags.
+ method: The RPC method.
+ host: A host string for the created call.
+ deadline: A float to be the deadline of the created call or None if
+ the call is to have an infinite deadline.
+ metadata: The metadata for the call or None.
+ credentials: A cygrpc.CallCredentials or None.
+ operations: A sequence of sequences of cygrpc.Operations to be
+ started on the call.
+ event_handler: A behavior to call to handle the events resultant from
+ the operations on the call.
+ context: Context object for distributed tracing.
+ Returns:
+ A cygrpc.IntegratedCall with which to conduct an RPC.
+ """
+ operations_and_tags = tuple((
+ operation,
+ event_handler,
+ ) for operation in operations)
+ with state.lock:
+ call = state.channel.integrated_call(flags, method, host, deadline,
+ metadata, credentials,
+ operations_and_tags, context)
+ if state.managed_calls == 0:
+ state.managed_calls = 1
+ _run_channel_spin_thread(state)
+ else:
+ state.managed_calls += 1
+ return call
+
+ return create
+
+
+class _ChannelConnectivityState(object):
+ lock: threading.RLock
+ channel: grpc.Channel
+ polling: bool
+ connectivity: grpc.ChannelConnectivity
+ try_to_connect: bool
+ # TODO(xuanwn): Refactor this: https://github.com/grpc/grpc/issues/31704
+ callbacks_and_connectivities: List[Sequence[Union[Callable[
+ [grpc.ChannelConnectivity], None], Optional[grpc.ChannelConnectivity]]]]
+ delivering: bool
+
+ def __init__(self, channel: grpc.Channel):
+ self.lock = threading.RLock()
+ self.channel = channel
+ self.polling = False
+ self.connectivity = None
+ self.try_to_connect = False
+ self.callbacks_and_connectivities = []
+ self.delivering = False
+
+ def reset_postfork_child(self) -> None:
+ self.polling = False
+ self.connectivity = None
+ self.try_to_connect = False
+ self.callbacks_and_connectivities = []
+ self.delivering = False
+
+
+def _deliveries(
+ state: _ChannelConnectivityState
+) -> List[Callable[[grpc.ChannelConnectivity], None]]:
+ callbacks_needing_update = []
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback, callback_connectivity, = callback_and_connectivity
+ if callback_connectivity is not state.connectivity:
+ callbacks_needing_update.append(callback)
+ callback_and_connectivity[1] = state.connectivity
+ return callbacks_needing_update
+
+
+def _deliver(
+ state: _ChannelConnectivityState,
+ initial_connectivity: grpc.ChannelConnectivity,
+ initial_callbacks: Sequence[Callable[[grpc.ChannelConnectivity], None]]
+) -> None:
+ connectivity = initial_connectivity
+ callbacks = initial_callbacks
+ while True:
+ for callback in callbacks:
+ cygrpc.block_if_fork_in_progress(state)
+ try:
+ callback(connectivity)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(
+ _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
+ with state.lock:
+ callbacks = _deliveries(state)
+ if callbacks:
+ connectivity = state.connectivity
+ else:
+ state.delivering = False
+ return
+
+
+def _spawn_delivery(
+ state: _ChannelConnectivityState,
+ callbacks: Sequence[Callable[[grpc.ChannelConnectivity],
+ None]]) -> None:
+ delivering_thread = cygrpc.ForkManagedThread(target=_deliver,
+ args=(
+ state,
+ state.connectivity,
+ callbacks,
+ ))
+ delivering_thread.setDaemon(True)
+ delivering_thread.start()
+ state.delivering = True
+
+
+# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
+def _poll_connectivity(state: _ChannelConnectivityState, channel: grpc.Channel,
+ initial_try_to_connect: bool) -> None:
+ try_to_connect = initial_try_to_connect
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.
+ CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[connectivity])
+ callbacks = tuple(
+ callback for callback, unused_but_known_to_be_none_connectivity in
+ state.callbacks_and_connectivities)
+ for callback_and_connectivity in state.callbacks_and_connectivities:
+ callback_and_connectivity[1] = state.connectivity
+ if callbacks:
+ _spawn_delivery(state, callbacks)
+ while True:
+ event = channel.watch_connectivity_state(connectivity,
+ time.time() + 0.2)
+ cygrpc.block_if_fork_in_progress(state)
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.try_to_connect:
+ state.polling = False
+ state.connectivity = None
+ break
+ try_to_connect = state.try_to_connect
+ state.try_to_connect = False
+ if event.success or try_to_connect:
+ connectivity = channel.check_connectivity_state(try_to_connect)
+ with state.lock:
+ state.connectivity = (
+ _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
+ connectivity])
+ if not state.delivering:
+ callbacks = _deliveries(state)
+ if callbacks:
+ _spawn_delivery(state, callbacks)
+
+
+def _subscribe(state: _ChannelConnectivityState,
+ callback: Callable[[grpc.ChannelConnectivity],
+ None], try_to_connect: bool) -> None:
+ with state.lock:
+ if not state.callbacks_and_connectivities and not state.polling:
+ polling_thread = cygrpc.ForkManagedThread(
+ target=_poll_connectivity,
+ args=(state, state.channel, bool(try_to_connect)))
+ polling_thread.setDaemon(True)
+ polling_thread.start()
+ state.polling = True
+ state.callbacks_and_connectivities.append([callback, None])
+ elif not state.delivering and state.connectivity is not None:
+ _spawn_delivery(state, (callback,))
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append(
+ [callback, state.connectivity])
+ else:
+ state.try_to_connect |= bool(try_to_connect)
+ state.callbacks_and_connectivities.append([callback, None])
+
+
+def _unsubscribe(state: _ChannelConnectivityState,
+ callback: Callable[[grpc.ChannelConnectivity], None]) -> None:
+ with state.lock:
+ for index, (subscribed_callback, unused_connectivity) in enumerate(
+ state.callbacks_and_connectivities):
+ if callback == subscribed_callback:
+ state.callbacks_and_connectivities.pop(index)
+ break
+
+
+def _augment_options(
+ base_options: Sequence[ChannelArgumentType],
+ compression: Optional[grpc.Compression]
+) -> Sequence[ChannelArgumentType]:
+ compression_option = _compression.create_channel_option(compression)
+ return tuple(base_options) + compression_option + ((
+ cygrpc.ChannelArgKey.primary_user_agent_string,
+ _USER_AGENT,
+ ),)
+
+
+def _separate_channel_options(
+ options: Sequence[ChannelArgumentType]
+) -> Tuple[Sequence[ChannelArgumentType], Sequence[ChannelArgumentType]]:
+ """Separates core channel options from Python channel options."""
+ core_options = []
+ python_options = []
+ for pair in options:
+ if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
+ python_options.append(pair)
+ else:
+ core_options.append(pair)
+ return python_options, core_options
+
+
+class Channel(grpc.Channel):
+ """A cygrpc.Channel-backed implementation of grpc.Channel."""
+ _single_threaded_unary_stream: bool
+ _channel: cygrpc.Channel
+ _call_state: _ChannelCallState
+ _connectivity_state: _ChannelConnectivityState
+
+ def __init__(self, target: str, options: Sequence[ChannelArgumentType],
+ credentials: Optional[grpc.ChannelCredentials],
+ compression: Optional[grpc.Compression]):
+ """Constructor.
+
+ Args:
+ target: The target to which to connect.
+ options: Configuration options for the channel.
+ credentials: A cygrpc.ChannelCredentials or None.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+ """
+ python_options, core_options = _separate_channel_options(options)
+ self._single_threaded_unary_stream = _DEFAULT_SINGLE_THREADED_UNARY_STREAM
+ self._process_python_options(python_options)
+ self._channel = cygrpc.Channel(
+ _common.encode(target), _augment_options(core_options, compression),
+ credentials)
+ self._call_state = _ChannelCallState(self._channel)
+ self._connectivity_state = _ChannelConnectivityState(self._channel)
+ cygrpc.fork_register_channel(self)
+ if cygrpc.g_gevent_activated:
+ cygrpc.gevent_increment_channel_count()
+
+ def _process_python_options(
+ self, python_options: Sequence[ChannelArgumentType]) -> None:
+ """Sets channel attributes according to python-only channel options."""
+ for pair in python_options:
+ if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
+ self._single_threaded_unary_stream = True
+
+ def subscribe(self,
+ callback: Callable[[grpc.ChannelConnectivity], None],
+ try_to_connect: Optional[bool] = None) -> None:
+ _subscribe(self._connectivity_state, callback, try_to_connect)
+
+ def unsubscribe(
+ self, callback: Callable[[grpc.ChannelConnectivity], None]) -> None:
+ _unsubscribe(self._connectivity_state, callback)
+
+ def unary_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.UnaryUnaryMultiCallable:
+ return _UnaryUnaryMultiCallable(
+ self._channel, _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def unary_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.UnaryStreamMultiCallable:
+ # NOTE(rbellevi): Benchmarks have shown that running a unary-stream RPC
+ # on a single Python thread results in an appreciable speed-up. However,
+ # due to slight differences in capability, the multi-threaded variant
+ # remains the default.
+ if self._single_threaded_unary_stream:
+ return _SingleThreadedUnaryStreamMultiCallable(
+ self._channel, _common.encode(method), request_serializer,
+ response_deserializer)
+ else:
+ return _UnaryStreamMultiCallable(
+ self._channel,
+ _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer,
+ response_deserializer)
+
+ def stream_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.StreamUnaryMultiCallable:
+ return _StreamUnaryMultiCallable(
+ self._channel, _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def stream_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.StreamStreamMultiCallable:
+ return _StreamStreamMultiCallable(
+ self._channel, _channel_managed_call_management(self._call_state),
+ _common.encode(method), request_serializer, response_deserializer)
+
+ def _unsubscribe_all(self) -> None:
+ state = self._connectivity_state
+ if state:
+ with state.lock:
+ del state.callbacks_and_connectivities[:]
+
+ def _close(self) -> None:
+ self._unsubscribe_all()
+ self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
+ cygrpc.fork_unregister_channel(self)
+ if cygrpc.g_gevent_activated:
+ cygrpc.gevent_decrement_channel_count()
+
+ def _close_on_fork(self) -> None:
+ self._unsubscribe_all()
+ self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
+ 'Channel closed due to fork')
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._close()
+ return False
+
+ def close(self) -> None:
+ self._close()
+
+ def __del__(self):
+ # TODO(https://github.com/grpc/grpc/issues/12531): Several releases
+ # after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
+ # here (or more likely, call self._close() here). We don't do this today
+ # because many valid use cases today allow the channel to be deleted
+ # immediately after stubs are created. After a sufficient period of time
+ # has passed for all users to be trusted to freeze out to their channels
+ # for as long as they are in use and to close them after using them,
+ # then deletion of this grpc._channel.Channel instance can be made to
+ # effect closure of the underlying cygrpc.Channel instance.
+ try:
+ self._unsubscribe_all()
+ except: # pylint: disable=bare-except
+ # Exceptions in __del__ are ignored by Python anyway, but they can
+ # keep spamming logs. Just silence them.
+ pass
diff --git a/contrib/python/grpcio/py3/grpc/_common.py b/contrib/python/grpcio/py3/grpc/_common.py
new file mode 100644
index 0000000000..3b8fd0ff97
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_common.py
@@ -0,0 +1,177 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Shared implementation."""
+
+import logging
+import time
+from typing import Any, AnyStr, Callable, Optional, Union
+
+import grpc
+from grpc._cython import cygrpc
+from grpc._typing import DeserializingFunction
+from grpc._typing import SerializingFunction
+
+_LOGGER = logging.getLogger(__name__)
+
+CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
+ cygrpc.ConnectivityState.idle:
+ grpc.ChannelConnectivity.IDLE,
+ cygrpc.ConnectivityState.connecting:
+ grpc.ChannelConnectivity.CONNECTING,
+ cygrpc.ConnectivityState.ready:
+ grpc.ChannelConnectivity.READY,
+ cygrpc.ConnectivityState.transient_failure:
+ grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+ cygrpc.ConnectivityState.shutdown:
+ grpc.ChannelConnectivity.SHUTDOWN,
+}
+
+CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
+ cygrpc.StatusCode.ok: grpc.StatusCode.OK,
+ cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED,
+ cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN,
+ cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT,
+ cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED,
+ cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND,
+ cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS,
+ cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED,
+ cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED,
+ cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED,
+ cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION,
+ cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED,
+ cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE,
+ cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED,
+ cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL,
+ cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE,
+ cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS,
+}
+STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
+ grpc_code: cygrpc_code
+ for cygrpc_code, grpc_code in CYGRPC_STATUS_CODE_TO_STATUS_CODE.items()
+}
+
+MAXIMUM_WAIT_TIMEOUT = 0.1
+
+_ERROR_MESSAGE_PORT_BINDING_FAILED = 'Failed to bind to address %s; set ' \
+ 'GRPC_VERBOSITY=debug environment variable to see detailed error message.'
+
+
+def encode(s: AnyStr) -> bytes:
+ if isinstance(s, bytes):
+ return s
+ else:
+ return s.encode('utf8')
+
+
+def decode(b: AnyStr) -> str:
+ if isinstance(b, bytes):
+ return b.decode('utf-8', 'replace')
+ return b
+
+
+def _transform(message: Any, transformer: Union[SerializingFunction,
+ DeserializingFunction, None],
+ exception_message: str) -> Any:
+ if transformer is None:
+ return message
+ else:
+ try:
+ return transformer(message)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(exception_message)
+ return None
+
+
+def serialize(message: Any, serializer: Optional[SerializingFunction]) -> bytes:
+ return _transform(message, serializer, 'Exception serializing message!')
+
+
+def deserialize(serialized_message: bytes,
+ deserializer: Optional[DeserializingFunction]) -> Any:
+ return _transform(serialized_message, deserializer,
+ 'Exception deserializing message!')
+
+
+def fully_qualified_method(group: str, method: str) -> str:
+ return '/{}/{}'.format(group, method)
+
+
+def _wait_once(wait_fn: Callable[..., bool], timeout: float,
+ spin_cb: Optional[Callable[[], None]]):
+ wait_fn(timeout=timeout)
+ if spin_cb is not None:
+ spin_cb()
+
+
+def wait(wait_fn: Callable[..., bool],
+ wait_complete_fn: Callable[[], bool],
+ timeout: Optional[float] = None,
+ spin_cb: Optional[Callable[[], None]] = None) -> bool:
+ """Blocks waiting for an event without blocking the thread indefinitely.
+
+ See https://github.com/grpc/grpc/issues/19464 for full context. CPython's
+ `threading.Event.wait` and `threading.Condition.wait` methods, if invoked
+ without a timeout kwarg, may block the calling thread indefinitely. If the
+ call is made from the main thread, this means that signal handlers may not
+ run for an arbitrarily long period of time.
+
+ This wrapper calls the supplied wait function with an arbitrary short
+ timeout to ensure that no signal handler has to wait longer than
+ MAXIMUM_WAIT_TIMEOUT before executing.
+
+ Args:
+ wait_fn: A callable acceptable a single float-valued kwarg named
+ `timeout`. This function is expected to be one of `threading.Event.wait`
+ or `threading.Condition.wait`.
+ wait_complete_fn: A callable taking no arguments and returning a bool.
+ When this function returns true, it indicates that waiting should cease.
+ timeout: An optional float-valued number of seconds after which the wait
+ should cease.
+ spin_cb: An optional Callable taking no arguments and returning nothing.
+ This callback will be called on each iteration of the spin. This may be
+ used for, e.g. work related to forking.
+
+ Returns:
+ True if a timeout was supplied and it was reached. False otherwise.
+ """
+ if timeout is None:
+ while not wait_complete_fn():
+ _wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb)
+ else:
+ end = time.time() + timeout
+ while not wait_complete_fn():
+ remaining = min(end - time.time(), MAXIMUM_WAIT_TIMEOUT)
+ if remaining < 0:
+ return True
+ _wait_once(wait_fn, remaining, spin_cb)
+ return False
+
+
+def validate_port_binding_result(address: str, port: int) -> int:
+ """Validates if the port binding succeed.
+
+ If the port returned by Core is 0, the binding is failed. However, in that
+ case, the Core API doesn't return a detailed failing reason. The best we
+ can do is raising an exception to prevent further confusion.
+
+ Args:
+ address: The address string to be bound.
+ port: An int returned by core
+ """
+ if port == 0:
+ # The Core API doesn't return a failure message. The best we can do
+ # is raising an exception to prevent further confusion.
+ raise RuntimeError(_ERROR_MESSAGE_PORT_BINDING_FAILED % address)
+ else:
+ return port
diff --git a/contrib/python/grpcio/py3/grpc/_compression.py b/contrib/python/grpcio/py3/grpc/_compression.py
new file mode 100644
index 0000000000..5eb6f2ac6d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_compression.py
@@ -0,0 +1,63 @@
+# Copyright 2019 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import grpc
+from grpc._cython import cygrpc
+from grpc._typing import MetadataType
+
+NoCompression = cygrpc.CompressionAlgorithm.none
+Deflate = cygrpc.CompressionAlgorithm.deflate
+Gzip = cygrpc.CompressionAlgorithm.gzip
+
+_METADATA_STRING_MAPPING = {
+ NoCompression: 'identity',
+ Deflate: 'deflate',
+ Gzip: 'gzip',
+}
+
+
+def _compression_algorithm_to_metadata_value(
+ compression: grpc.Compression) -> str:
+ return _METADATA_STRING_MAPPING[compression]
+
+
+def compression_algorithm_to_metadata(compression: grpc.Compression):
+ return (cygrpc.GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY,
+ _compression_algorithm_to_metadata_value(compression))
+
+
+def create_channel_option(compression: Optional[grpc.Compression]):
+ return ((cygrpc.GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM,
+ int(compression)),) if compression else ()
+
+
+def augment_metadata(metadata: Optional[MetadataType],
+ compression: Optional[grpc.Compression]):
+ if not metadata and not compression:
+ return None
+ base_metadata = tuple(metadata) if metadata else ()
+ compression_metadata = (
+ compression_algorithm_to_metadata(compression),) if compression else ()
+ return base_metadata + compression_metadata
+
+
+__all__ = (
+ "NoCompression",
+ "Deflate",
+ "Gzip",
+)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/__init__.py b/contrib/python/grpcio/py3/grpc/_cython/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/__init__.py b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pxd.pxi
new file mode 100644
index 0000000000..3eb10f5275
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pxd.pxi
@@ -0,0 +1,16 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef object _custom_op_on_c_call(int op, grpc_call *call)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pyx.pxi
new file mode 100644
index 0000000000..de4d71b819
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/_hooks.pyx.pxi
@@ -0,0 +1,35 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef object _custom_op_on_c_call(int op, grpc_call *call):
+ raise NotImplementedError("No custom hooks are implemented")
+
+def install_context_from_request_call_event(RequestCallEvent event):
+ pass
+
+def uninstall_context():
+ pass
+
+def build_census_context():
+ pass
+
+cdef class CensusContext:
+ pass
+
+def set_census_context_on_call(_CallState call_state, CensusContext census_ctx):
+ pass
+
+def get_deadline_from_context():
+ return None
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pxd.pxi
new file mode 100644
index 0000000000..867245a694
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pxd.pxi
@@ -0,0 +1,47 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _AioCall(GrpcCallWrapper):
+ cdef:
+ readonly AioChannel _channel
+ list _references
+ object _deadline
+ list _done_callbacks
+
+ # Caches the picked event loop, so we can avoid the 30ns overhead each
+ # time we need access to the event loop.
+ object _loop
+
+ # Flag indicates whether cancel being called or not. Cancellation from
+ # Core or peer works perfectly fine with normal procedure. However, we
+ # need this flag to clean up resources for cancellation from the
+ # application layer. Directly cancelling tasks might cause segfault
+ # because Core is holding a pointer for the callback handler.
+ bint _is_locally_cancelled
+
+ # Following attributes are used for storing the status of the call and
+ # the initial metadata. Waiters are used for pausing the execution of
+ # tasks that are asking for one of the field when they are not yet
+ # available.
+ readonly AioRpcStatus _status
+ readonly tuple _initial_metadata
+ list _waiters_status
+ list _waiters_initial_metadata
+
+ int _send_initial_metadata_flags
+
+ cdef void _create_grpc_call(self, object timeout, bytes method, CallCredentials credentials) except *
+ cdef void _set_status(self, AioRpcStatus status) except *
+ cdef void _set_initial_metadata(self, tuple initial_metadata) except *
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pyx.pxi
new file mode 100644
index 0000000000..7bce1850dc
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/call.pyx.pxi
@@ -0,0 +1,508 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+_EMPTY_FLAGS = 0
+_EMPTY_MASK = 0
+_IMMUTABLE_EMPTY_METADATA = tuple()
+
+_UNKNOWN_CANCELLATION_DETAILS = 'RPC cancelled for unknown reason.'
+_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '>')
+
+_NON_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '\tdebug_error_string = "{}"\n'
+ '>')
+
+
+cdef int _get_send_initial_metadata_flags(object wait_for_ready) except *:
+ cdef int flags = 0
+ # Wait-for-ready can be None, which means using default value in Core.
+ if wait_for_ready is not None:
+ flags |= InitialMetadataFlags.wait_for_ready_explicitly_set
+ if wait_for_ready:
+ flags |= InitialMetadataFlags.wait_for_ready
+
+ flags &= InitialMetadataFlags.used_mask
+ return flags
+
+
+cdef class _AioCall(GrpcCallWrapper):
+
+ def __cinit__(self, AioChannel channel, object deadline,
+ bytes method, CallCredentials call_credentials, object wait_for_ready):
+ init_grpc_aio()
+ self.call = NULL
+ self._channel = channel
+ self._loop = channel.loop
+ self._references = []
+ self._status = None
+ self._initial_metadata = None
+ self._waiters_status = []
+ self._waiters_initial_metadata = []
+ self._done_callbacks = []
+ self._is_locally_cancelled = False
+ self._deadline = deadline
+ self._send_initial_metadata_flags = _get_send_initial_metadata_flags(wait_for_ready)
+ self._create_grpc_call(deadline, method, call_credentials)
+
+ def __dealloc__(self):
+ if self.call:
+ grpc_call_unref(self.call)
+ shutdown_grpc_aio()
+
+ def _repr(self) -> str:
+ """Assembles the RPC representation string."""
+ # This needs to be loaded at run time once everything
+ # has been loaded.
+ from grpc import _common
+
+ if not self.done():
+ return '<{} object>'.format(self.__class__.__name__)
+
+ if self._status.code() is StatusCode.ok:
+ return _OK_CALL_REPRESENTATION.format(
+ self.__class__.__name__,
+ _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[self._status.code()],
+ self._status.details())
+ else:
+ return _NON_OK_CALL_REPRESENTATION.format(
+ self.__class__.__name__,
+ self._status.details(),
+ _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[self._status.code()],
+ self._status.debug_error_string())
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+ cdef void _create_grpc_call(self,
+ object deadline,
+ bytes method,
+ CallCredentials credentials) except *:
+ """Creates the corresponding Core object for this RPC.
+
+ For unary calls, the grpc_call lives shortly and can be destroyed after
+ invoke start_batch. However, if either side is streaming, the grpc_call
+ life span will be longer than one function. So, it would better save it
+ as an instance variable than a stack variable, which reflects its
+ nature in Core.
+ """
+ cdef grpc_slice method_slice
+ cdef gpr_timespec c_deadline = _timespec_from_time(deadline)
+ cdef grpc_call_error set_credentials_error
+
+ method_slice = grpc_slice_from_copied_buffer(
+ <const char *> method,
+ <size_t> len(method)
+ )
+ self.call = grpc_channel_create_call(
+ self._channel.channel,
+ NULL,
+ _EMPTY_MASK,
+ global_completion_queue(),
+ method_slice,
+ NULL,
+ c_deadline,
+ NULL
+ )
+
+ if credentials is not None:
+ set_credentials_error = grpc_call_set_credentials(self.call, credentials.c())
+ if set_credentials_error != GRPC_CALL_OK:
+ raise InternalError("Credentials couldn't have been set: {0}".format(set_credentials_error))
+
+ grpc_slice_unref(method_slice)
+
+ cdef void _set_status(self, AioRpcStatus status) except *:
+ cdef list waiters
+
+ # No more waiters should be expected since status has been set.
+ self._status = status
+
+ if self._initial_metadata is None:
+ self._set_initial_metadata(_IMMUTABLE_EMPTY_METADATA)
+
+ for waiter in self._waiters_status:
+ if not waiter.done():
+ waiter.set_result(None)
+ self._waiters_status = []
+
+ for callback in self._done_callbacks:
+ callback()
+
+ cdef void _set_initial_metadata(self, tuple initial_metadata) except *:
+ if self._initial_metadata is not None:
+ # Some gRPC calls might end before the initial metadata arrived in
+ # the Call object. That causes this method to be invoked twice: 1.
+ # filled with an empty metadata; 2. updated with the actual user
+ # provided metadata.
+ return
+
+ cdef list waiters
+
+ # No more waiters should be expected since initial metadata has been
+ # set.
+ self._initial_metadata = initial_metadata
+
+ for waiter in self._waiters_initial_metadata:
+ if not waiter.done():
+ waiter.set_result(None)
+ self._waiters_initial_metadata = []
+
+ def add_done_callback(self, callback):
+ if self.done():
+ callback()
+ else:
+ self._done_callbacks.append(callback)
+
+ def time_remaining(self):
+ if self._deadline is None:
+ return None
+ else:
+ return max(0, self._deadline - time.time())
+
+ def cancel(self, str details):
+ """Cancels the RPC in Core with given RPC status.
+
+ Above abstractions must invoke this method to set Core objects into
+ proper state.
+ """
+ self._is_locally_cancelled = True
+
+ cdef object details_bytes
+ cdef char *c_details
+ cdef grpc_call_error error
+
+ self._set_status(AioRpcStatus(
+ StatusCode.cancelled,
+ details,
+ None,
+ None,
+ ))
+
+ details_bytes = str_to_bytes(details)
+ self._references.append(details_bytes)
+ c_details = <char *>details_bytes
+ # By implementation, grpc_call_cancel_with_status always return OK
+ error = grpc_call_cancel_with_status(
+ self.call,
+ StatusCode.cancelled,
+ c_details,
+ NULL,
+ )
+ assert error == GRPC_CALL_OK
+
+ def done(self):
+ """Returns if the RPC call has finished.
+
+ Checks if the status has been provided, either
+ because the RPC finished or because was cancelled..
+
+ Returns:
+ True if the RPC can be considered finished.
+ """
+ return self._status is not None
+
+ def cancelled(self):
+ """Returns if the RPC was cancelled.
+
+ Returns:
+ True if the RPC was cancelled.
+ """
+ if not self.done():
+ return False
+
+ return self._status.code() == StatusCode.cancelled
+
+ async def status(self):
+ """Returns the status of the RPC call.
+
+ It returns the finshed status of the RPC. If the RPC
+ has not finished yet this function will wait until the RPC
+ gets finished.
+
+ Returns:
+ Finished status of the RPC as an AioRpcStatus object.
+ """
+ if self._status is not None:
+ return self._status
+
+ future = self._loop.create_future()
+ self._waiters_status.append(future)
+ await future
+
+ return self._status
+
+ def is_ok(self):
+ """Returns if the RPC is ended with ok."""
+ return self.done() and self._status.code() == StatusCode.ok
+
+ async def initial_metadata(self):
+ """Returns the initial metadata of the RPC call.
+
+ If the initial metadata has not been received yet this function will
+ wait until the RPC gets finished.
+
+ Returns:
+ The tuple object with the initial metadata.
+ """
+ if self._initial_metadata is not None:
+ return self._initial_metadata
+
+ future = self._loop.create_future()
+ self._waiters_initial_metadata.append(future)
+ await future
+
+ return self._initial_metadata
+
+ def is_locally_cancelled(self):
+ """Returns if the RPC was cancelled locally.
+
+ Returns:
+ True when was cancelled locally, False when was cancelled remotelly or
+ is still ongoing.
+ """
+ if self._is_locally_cancelled:
+ return True
+
+ return False
+
+ async def unary_unary(self,
+ bytes request,
+ tuple outbound_initial_metadata):
+ """Performs a unary unary RPC.
+
+ Args:
+ request: the serialized requests in bytes.
+ outbound_initial_metadata: optional outbound metadata.
+ """
+ cdef tuple ops
+
+ cdef SendInitialMetadataOperation initial_metadata_op = SendInitialMetadataOperation(
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags)
+ cdef SendMessageOperation send_message_op = SendMessageOperation(request, _EMPTY_FLAGS)
+ cdef SendCloseFromClientOperation send_close_op = SendCloseFromClientOperation(_EMPTY_FLAGS)
+ cdef ReceiveInitialMetadataOperation receive_initial_metadata_op = ReceiveInitialMetadataOperation(_EMPTY_FLAGS)
+ cdef ReceiveMessageOperation receive_message_op = ReceiveMessageOperation(_EMPTY_FLAGS)
+ cdef ReceiveStatusOnClientOperation receive_status_on_client_op = ReceiveStatusOnClientOperation(_EMPTY_FLAGS)
+
+ ops = (initial_metadata_op, send_message_op, send_close_op,
+ receive_initial_metadata_op, receive_message_op,
+ receive_status_on_client_op)
+
+ # Executes all operations in one batch.
+ # Might raise CancelledError, handling it in Python UnaryUnaryCall.
+ await execute_batch(self,
+ ops,
+ self._loop)
+
+ self._set_initial_metadata(receive_initial_metadata_op.initial_metadata())
+
+ cdef grpc_status_code code
+ code = receive_status_on_client_op.code()
+
+ self._set_status(AioRpcStatus(
+ code,
+ receive_status_on_client_op.details(),
+ receive_status_on_client_op.trailing_metadata(),
+ receive_status_on_client_op.error_string(),
+ ))
+
+ if code == StatusCode.ok:
+ return receive_message_op.message()
+ else:
+ return None
+
+ async def _handle_status_once_received(self):
+ """Handles the status sent by peer once received."""
+ cdef ReceiveStatusOnClientOperation op = ReceiveStatusOnClientOperation(_EMPTY_FLAGS)
+ cdef tuple ops = (op,)
+ await execute_batch(self, ops, self._loop)
+
+ # Halts if the RPC is locally cancelled
+ if self._is_locally_cancelled:
+ return
+
+ self._set_status(AioRpcStatus(
+ op.code(),
+ op.details(),
+ op.trailing_metadata(),
+ op.error_string(),
+ ))
+
+ async def receive_serialized_message(self):
+ """Receives one single raw message in bytes."""
+ cdef bytes received_message
+
+ # Receives a message. Returns None when failed:
+ # * EOF, no more messages to read;
+ # * The client application cancels;
+ # * The server sends final status.
+ received_message = await _receive_message(
+ self,
+ self._loop
+ )
+ if received_message is not None:
+ return received_message
+ else:
+ return EOF
+
+ async def send_serialized_message(self, bytes message):
+ """Sends one single raw message in bytes."""
+ await _send_message(self,
+ message,
+ None,
+ False,
+ self._loop)
+
+ async def send_receive_close(self):
+ """Half close the RPC on the client-side."""
+ cdef SendCloseFromClientOperation op = SendCloseFromClientOperation(_EMPTY_FLAGS)
+ cdef tuple ops = (op,)
+ await execute_batch(self, ops, self._loop)
+
+ async def initiate_unary_stream(self,
+ bytes request,
+ tuple outbound_initial_metadata):
+ """Implementation of the start of a unary-stream call."""
+ # Peer may prematurely end this RPC at any point. We need a corutine
+ # that watches if the server sends the final status.
+ status_task = self._loop.create_task(self._handle_status_once_received())
+
+ cdef tuple outbound_ops
+ cdef Operation initial_metadata_op = SendInitialMetadataOperation(
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags)
+ cdef Operation send_message_op = SendMessageOperation(
+ request,
+ _EMPTY_FLAGS)
+ cdef Operation send_close_op = SendCloseFromClientOperation(
+ _EMPTY_FLAGS)
+
+ outbound_ops = (
+ initial_metadata_op,
+ send_message_op,
+ send_close_op,
+ )
+
+ try:
+ # Sends out the request message.
+ await execute_batch(self,
+ outbound_ops,
+ self._loop)
+
+ # Receives initial metadata.
+ self._set_initial_metadata(
+ await _receive_initial_metadata(self,
+ self._loop),
+ )
+ except ExecuteBatchError as batch_error:
+ # Core should explain why this batch failed
+ await status_task
+
+ async def stream_unary(self,
+ tuple outbound_initial_metadata,
+ object metadata_sent_observer):
+ """Actual implementation of the complete unary-stream call.
+
+ Needs to pay extra attention to the raise mechanism. If we want to
+ propagate the final status exception, then we have to raise it.
+ Othersize, it would end normally and raise `StopAsyncIteration()`.
+ """
+ try:
+ # Sends out initial_metadata ASAP.
+ await _send_initial_metadata(self,
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags,
+ self._loop)
+ # Notify upper level that sending messages are allowed now.
+ metadata_sent_observer()
+
+ # Receives initial metadata.
+ self._set_initial_metadata(
+ await _receive_initial_metadata(self, self._loop)
+ )
+ except ExecuteBatchError:
+ # Core should explain why this batch failed
+ await self._handle_status_once_received()
+
+ # Allow upper layer to proceed only if the status is set
+ metadata_sent_observer()
+ return None
+
+ cdef tuple inbound_ops
+ cdef ReceiveMessageOperation receive_message_op = ReceiveMessageOperation(_EMPTY_FLAGS)
+ cdef ReceiveStatusOnClientOperation receive_status_on_client_op = ReceiveStatusOnClientOperation(_EMPTY_FLAGS)
+ inbound_ops = (receive_message_op, receive_status_on_client_op)
+
+ # Executes all operations in one batch.
+ await execute_batch(self,
+ inbound_ops,
+ self._loop)
+
+ cdef grpc_status_code code
+ code = receive_status_on_client_op.code()
+
+ self._set_status(AioRpcStatus(
+ code,
+ receive_status_on_client_op.details(),
+ receive_status_on_client_op.trailing_metadata(),
+ receive_status_on_client_op.error_string(),
+ ))
+
+ if code == StatusCode.ok:
+ return receive_message_op.message()
+ else:
+ return None
+
+ async def initiate_stream_stream(self,
+ tuple outbound_initial_metadata,
+ object metadata_sent_observer):
+ """Actual implementation of the complete stream-stream call.
+
+ Needs to pay extra attention to the raise mechanism. If we want to
+ propagate the final status exception, then we have to raise it.
+ Othersize, it would end normally and raise `StopAsyncIteration()`.
+ """
+ # Peer may prematurely end this RPC at any point. We need a corutine
+ # that watches if the server sends the final status.
+ status_task = self._loop.create_task(self._handle_status_once_received())
+
+ try:
+ # Sends out initial_metadata ASAP.
+ await _send_initial_metadata(self,
+ outbound_initial_metadata,
+ self._send_initial_metadata_flags,
+ self._loop)
+ # Notify upper level that sending messages are allowed now.
+ metadata_sent_observer()
+
+ # Receives initial metadata.
+ self._set_initial_metadata(
+ await _receive_initial_metadata(self, self._loop)
+ )
+ except ExecuteBatchError as batch_error:
+ # Core should explain why this batch failed
+ await status_task
+
+ # Allow upper layer to proceed only if the status is set
+ metadata_sent_observer()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi
new file mode 100644
index 0000000000..e54e510754
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pxd.pxi
@@ -0,0 +1,57 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class CallbackFailureHandler:
+ cdef str _core_function_name
+ cdef object _error_details
+ cdef object _exception_type
+
+ cdef handle(self, object future)
+
+
+cdef struct CallbackContext:
+ # C struct to store callback context in the form of pointers.
+ #
+ # Attributes:
+ # functor: A grpc_completion_queue_functor represents the
+ # callback function in the only way Core understands.
+ # waiter: An asyncio.Future object that fulfills when the callback is
+ # invoked by Core.
+ # failure_handler: A CallbackFailureHandler object that called when Core
+ # returns 'success == 0' state.
+ # wrapper: A self-reference to the CallbackWrapper to help life cycle
+ # management.
+ grpc_completion_queue_functor functor
+ cpython.PyObject *waiter
+ cpython.PyObject *loop
+ cpython.PyObject *failure_handler
+ cpython.PyObject *callback_wrapper
+
+
+cdef class CallbackWrapper:
+ cdef CallbackContext context
+ cdef object _reference_of_future
+ cdef object _reference_of_failure_handler
+
+ @staticmethod
+ cdef void functor_run(
+ grpc_completion_queue_functor* functor,
+ int succeed)
+
+ cdef grpc_completion_queue_functor *c_functor(self)
+
+
+cdef class GrpcCallWrapper:
+ cdef grpc_call* call
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
new file mode 100644
index 0000000000..14a0098fc2
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/callback_common.pyx.pxi
@@ -0,0 +1,185 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class CallbackFailureHandler:
+
+ def __cinit__(self,
+ str core_function_name,
+ object error_details,
+ object exception_type):
+ """Handles failure by raising exception."""
+ self._core_function_name = core_function_name
+ self._error_details = error_details
+ self._exception_type = exception_type
+
+ cdef handle(self, object future):
+ future.set_exception(self._exception_type(
+ 'Failed "%s": %s' % (self._core_function_name, self._error_details)
+ ))
+
+
+cdef class CallbackWrapper:
+
+ def __cinit__(self, object future, object loop, CallbackFailureHandler failure_handler):
+ self.context.functor.functor_run = self.functor_run
+ self.context.waiter = <cpython.PyObject*>future
+ self.context.loop = <cpython.PyObject*>loop
+ self.context.failure_handler = <cpython.PyObject*>failure_handler
+ self.context.callback_wrapper = <cpython.PyObject*>self
+ # NOTE(lidiz) Not using a list here, because this class is critical in
+ # data path. We should make it as efficient as possible.
+ self._reference_of_future = future
+ self._reference_of_failure_handler = failure_handler
+ # NOTE(lidiz) We need to ensure when Core invokes our callback, the
+ # callback function itself is not deallocated. Othersise, we will get
+ # a segfault. We can view this as Core holding a ref.
+ cpython.Py_INCREF(self)
+
+ @staticmethod
+ cdef void functor_run(
+ grpc_completion_queue_functor* functor,
+ int success):
+ cdef CallbackContext *context = <CallbackContext *>functor
+ cdef object waiter = <object>context.waiter
+ if not waiter.cancelled():
+ if success == 0:
+ (<CallbackFailureHandler>context.failure_handler).handle(waiter)
+ else:
+ waiter.set_result(None)
+ cpython.Py_DECREF(<object>context.callback_wrapper)
+
+ cdef grpc_completion_queue_functor *c_functor(self):
+ return &self.context.functor
+
+
+cdef CallbackFailureHandler CQ_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler(
+ 'grpc_completion_queue_shutdown',
+ 'Unknown',
+ InternalError)
+
+
+class ExecuteBatchError(InternalError):
+ """Raised when execute batch returns a failure from Core."""
+
+
+async def execute_batch(GrpcCallWrapper grpc_call_wrapper,
+ tuple operations,
+ object loop):
+ """The callback version of start batch operations."""
+ cdef _BatchOperationTag batch_operation_tag = _BatchOperationTag(None, operations, None)
+ batch_operation_tag.prepare()
+
+ cdef object future = loop.create_future()
+ cdef CallbackWrapper wrapper = CallbackWrapper(
+ future,
+ loop,
+ CallbackFailureHandler('execute_batch', operations, ExecuteBatchError))
+ cdef grpc_call_error error = grpc_call_start_batch(
+ grpc_call_wrapper.call,
+ batch_operation_tag.c_ops,
+ batch_operation_tag.c_nops,
+ wrapper.c_functor(), NULL)
+
+ if error != GRPC_CALL_OK:
+ grpc_call_error_string = grpc_call_error_to_string(error).decode()
+ raise ExecuteBatchError("Failed grpc_call_start_batch: {} with grpc_call_error value: '{}'".format(error, grpc_call_error_string))
+
+ await future
+
+ cdef grpc_event c_event
+ # Tag.event must be called, otherwise messages won't be parsed from C
+ batch_operation_tag.event(c_event)
+
+
+cdef prepend_send_initial_metadata_op(tuple ops, tuple metadata):
+ # Eventually, this function should be the only function that produces
+ # SendInitialMetadataOperation. So we have more control over the flag.
+ return (SendInitialMetadataOperation(
+ metadata,
+ _EMPTY_FLAG
+ ),) + ops
+
+
+async def _receive_message(GrpcCallWrapper grpc_call_wrapper,
+ object loop):
+ """Retrives parsed messages from Core.
+
+ The messages maybe already in Core's buffer, so there isn't a 1-to-1
+ mapping between this and the underlying "socket.read()". Also, eventually,
+ this function will end with an EOF, which reads empty message.
+ """
+ cdef ReceiveMessageOperation receive_op = ReceiveMessageOperation(_EMPTY_FLAG)
+ cdef tuple ops = (receive_op,)
+ try:
+ await execute_batch(grpc_call_wrapper, ops, loop)
+ except ExecuteBatchError as e:
+ # NOTE(lidiz) The receive message operation has two ways to indicate
+ # finish state : 1) returns empty message due to EOF; 2) fails inside
+ # the callback (e.g. cancelled).
+ #
+ # Since they all indicates finish, they are better be merged.
+ _LOGGER.debug('Failed to receive any message from Core')
+ # NOTE(lidiz) The returned message might be an empty bytes (aka. b'').
+ # Please explicitly check if it is None or falsey string object!
+ return receive_op.message()
+
+
+async def _send_message(GrpcCallWrapper grpc_call_wrapper,
+ bytes message,
+ Operation send_initial_metadata_op,
+ int write_flag,
+ object loop):
+ cdef SendMessageOperation op = SendMessageOperation(message, write_flag)
+ cdef tuple ops = (op,)
+ if send_initial_metadata_op is not None:
+ ops = (send_initial_metadata_op,) + ops
+ await execute_batch(grpc_call_wrapper, ops, loop)
+
+
+async def _send_initial_metadata(GrpcCallWrapper grpc_call_wrapper,
+ tuple metadata,
+ int flags,
+ object loop):
+ cdef SendInitialMetadataOperation op = SendInitialMetadataOperation(
+ metadata,
+ flags)
+ cdef tuple ops = (op,)
+ await execute_batch(grpc_call_wrapper, ops, loop)
+
+
+async def _receive_initial_metadata(GrpcCallWrapper grpc_call_wrapper,
+ object loop):
+ cdef ReceiveInitialMetadataOperation op = ReceiveInitialMetadataOperation(_EMPTY_FLAGS)
+ cdef tuple ops = (op,)
+ await execute_batch(grpc_call_wrapper, ops, loop)
+ return op.initial_metadata()
+
+async def _send_error_status_from_server(GrpcCallWrapper grpc_call_wrapper,
+ grpc_status_code code,
+ str details,
+ tuple trailing_metadata,
+ Operation send_initial_metadata_op,
+ object loop):
+ assert code != StatusCode.ok, 'Expecting non-ok status code.'
+ cdef SendStatusFromServerOperation op = SendStatusFromServerOperation(
+ trailing_metadata,
+ code,
+ details,
+ _EMPTY_FLAGS,
+ )
+ cdef tuple ops = (op,)
+ if send_initial_metadata_op is not None:
+ ops = (send_initial_metadata_op,) + ops
+ await execute_batch(grpc_call_wrapper, ops, loop)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pxd.pxi
new file mode 100644
index 0000000000..03b4990e48
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pxd.pxi
@@ -0,0 +1,27 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef enum AioChannelStatus:
+ AIO_CHANNEL_STATUS_UNKNOWN
+ AIO_CHANNEL_STATUS_READY
+ AIO_CHANNEL_STATUS_CLOSING
+ AIO_CHANNEL_STATUS_DESTROYED
+
+cdef class AioChannel:
+ cdef:
+ grpc_channel * channel
+ object loop
+ bytes _target
+ AioChannelStatus _status
+ bint _is_secure
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pyx.pxi
new file mode 100644
index 0000000000..4286ab1d27
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/channel.pyx.pxi
@@ -0,0 +1,135 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+class _WatchConnectivityFailed(Exception):
+ """Dedicated exception class for watch connectivity failed.
+
+ It might be failed due to deadline exceeded.
+ """
+cdef CallbackFailureHandler _WATCH_CONNECTIVITY_FAILURE_HANDLER = CallbackFailureHandler(
+ 'watch_connectivity_state',
+ 'Timed out',
+ _WatchConnectivityFailed)
+
+
+cdef class AioChannel:
+ def __cinit__(self, bytes target, tuple options, ChannelCredentials credentials, object loop):
+ init_grpc_aio()
+ if options is None:
+ options = ()
+ cdef _ChannelArgs channel_args = _ChannelArgs(options)
+ self._target = target
+ self.loop = loop
+ self._status = AIO_CHANNEL_STATUS_READY
+
+ if credentials is None:
+ self._is_secure = False
+ creds = grpc_insecure_credentials_create();
+ self.channel = grpc_channel_create(<char *>target,
+ creds,
+ channel_args.c_args())
+ grpc_channel_credentials_release(creds)
+ else:
+ self._is_secure = True
+ creds = <grpc_channel_credentials *> credentials.c()
+ self.channel = grpc_channel_create(<char *>target,
+ creds,
+ channel_args.c_args())
+ grpc_channel_credentials_release(creds)
+
+ def __dealloc__(self):
+ shutdown_grpc_aio()
+
+ def __repr__(self):
+ class_name = self.__class__.__name__
+ id_ = id(self)
+ return f"<{class_name} {id_}>"
+
+ def check_connectivity_state(self, bint try_to_connect):
+ """A Cython wrapper for Core's check connectivity state API."""
+ if self._status == AIO_CHANNEL_STATUS_DESTROYED:
+ return ConnectivityState.shutdown
+ else:
+ return grpc_channel_check_connectivity_state(
+ self.channel,
+ try_to_connect,
+ )
+
+ async def watch_connectivity_state(self,
+ grpc_connectivity_state last_observed_state,
+ object deadline):
+ """Watch for one connectivity state change.
+
+ Keeps mirroring the behavior from Core, so we can easily switch to
+ other design of API if necessary.
+ """
+ if self._status in (AIO_CHANNEL_STATUS_DESTROYED, AIO_CHANNEL_STATUS_CLOSING):
+ raise UsageError('Channel is closed.')
+
+ cdef gpr_timespec c_deadline = _timespec_from_time(deadline)
+
+ cdef object future = self.loop.create_future()
+ cdef CallbackWrapper wrapper = CallbackWrapper(
+ future,
+ self.loop,
+ _WATCH_CONNECTIVITY_FAILURE_HANDLER)
+ grpc_channel_watch_connectivity_state(
+ self.channel,
+ last_observed_state,
+ c_deadline,
+ global_completion_queue(),
+ wrapper.c_functor())
+
+ try:
+ await future
+ except _WatchConnectivityFailed:
+ return False
+ else:
+ return True
+
+ def closing(self):
+ self._status = AIO_CHANNEL_STATUS_CLOSING
+
+ def close(self):
+ self._status = AIO_CHANNEL_STATUS_DESTROYED
+ grpc_channel_destroy(self.channel)
+
+ def closed(self):
+ return self._status in (AIO_CHANNEL_STATUS_CLOSING, AIO_CHANNEL_STATUS_DESTROYED)
+
+ def call(self,
+ bytes method,
+ object deadline,
+ object python_call_credentials,
+ object wait_for_ready):
+ """Assembles a Cython Call object.
+
+ Returns:
+ An _AioCall object.
+ """
+ if self.closed():
+ raise UsageError('Channel is closed.')
+
+ cdef CallCredentials cython_call_credentials
+ if python_call_credentials is not None:
+ if not self._is_secure:
+ raise UsageError("Call credentials are only valid on secure channels")
+
+ cython_call_credentials = python_call_credentials._credentials
+ else:
+ cython_call_credentials = None
+
+ return _AioCall(self, deadline, method, cython_call_credentials, wait_for_ready)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/common.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/common.pyx.pxi
new file mode 100644
index 0000000000..f698390cd5
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/common.pyx.pxi
@@ -0,0 +1,202 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cpython.version cimport PY_MAJOR_VERSION, PY_MINOR_VERSION
+
+TYPE_METADATA_STRING = "Tuple[Tuple[str, Union[str, bytes]]...]"
+
+
+cdef grpc_status_code get_status_code(object code) except *:
+ if isinstance(code, int):
+ if code >= StatusCode.ok and code <= StatusCode.data_loss:
+ return code
+ else:
+ return StatusCode.unknown
+ else:
+ try:
+ return code.value[0]
+ except (KeyError, AttributeError):
+ return StatusCode.unknown
+
+
+cdef object deserialize(object deserializer, bytes raw_message):
+ """Perform deserialization on raw bytes.
+
+ Failure to deserialize is a fatal error.
+ """
+ if deserializer:
+ return deserializer(raw_message)
+ else:
+ return raw_message
+
+
+cdef bytes serialize(object serializer, object message):
+ """Perform serialization on a message.
+
+ Failure to serialize is a fatal error.
+ """
+ if isinstance(message, str):
+ message = message.encode('utf-8')
+ if serializer:
+ return serializer(message)
+ else:
+ return message
+
+
+class _EOF:
+
+ def __bool__(self):
+ return False
+
+ def __len__(self):
+ return 0
+
+ def _repr(self) -> str:
+ return '<grpc.aio.EOF>'
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+
+EOF = _EOF()
+
+_COMPRESSION_METADATA_STRING_MAPPING = {
+ CompressionAlgorithm.none: 'identity',
+ CompressionAlgorithm.deflate: 'deflate',
+ CompressionAlgorithm.gzip: 'gzip',
+}
+
+class BaseError(Exception):
+ """The base class for exceptions generated by gRPC AsyncIO stack."""
+
+
+class UsageError(BaseError):
+ """Raised when the usage of API by applications is inappropriate.
+
+ For example, trying to invoke RPC on a closed channel, mixing two styles
+ of streaming API on the client side. This exception should not be
+ suppressed.
+ """
+
+
+class AbortError(BaseError):
+ """Raised when calling abort in servicer methods.
+
+ This exception should not be suppressed. Applications may catch it to
+ perform certain clean-up logic, and then re-raise it.
+ """
+
+
+class InternalError(BaseError):
+ """Raised upon unexpected errors in native code."""
+
+
+def schedule_coro_threadsafe(object coro, object loop):
+ try:
+ return loop.create_task(coro)
+ except RuntimeError as runtime_error:
+ if 'Non-thread-safe operation' in str(runtime_error):
+ return asyncio.run_coroutine_threadsafe(
+ coro,
+ loop,
+ )
+ else:
+ raise
+
+
+def async_generator_to_generator(object agen, object loop):
+ """Converts an async generator into generator."""
+ try:
+ while True:
+ future = asyncio.run_coroutine_threadsafe(
+ agen.__anext__(),
+ loop
+ )
+ response = future.result()
+ if response is EOF:
+ break
+ else:
+ yield response
+ except StopAsyncIteration:
+ # If StopAsyncIteration is raised, end this generator.
+ pass
+
+
+async def generator_to_async_generator(object gen, object loop, object thread_pool):
+ """Converts a generator into async generator.
+
+ The generator might block, so we need to delegate the iteration to thread
+ pool. Also, we can't simply delegate __next__ to the thread pool, otherwise
+ we will see following error:
+
+ TypeError: StopIteration interacts badly with generators and cannot be
+ raised into a Future
+ """
+ queue = asyncio.Queue(maxsize=1)
+
+ def yield_to_queue():
+ try:
+ for item in gen:
+ asyncio.run_coroutine_threadsafe(queue.put(item), loop).result()
+ finally:
+ asyncio.run_coroutine_threadsafe(queue.put(EOF), loop).result()
+
+ future = loop.run_in_executor(
+ thread_pool,
+ yield_to_queue,
+ )
+
+ while True:
+ response = await queue.get()
+ if response is EOF:
+ break
+ else:
+ yield response
+
+ # Port the exception if there is any
+ await future
+
+
+if PY_MAJOR_VERSION >= 3 and PY_MINOR_VERSION >= 7:
+ def get_working_loop():
+ """Returns a running event loop.
+
+ Due to a defect of asyncio.get_event_loop, its returned event loop might
+ not be set as the default event loop for the main thread.
+ """
+ try:
+ return asyncio.get_running_loop()
+ except RuntimeError:
+ return asyncio.get_event_loop_policy().get_event_loop()
+else:
+ def get_working_loop():
+ """Returns a running event loop."""
+ return asyncio.get_event_loop()
+
+
+def raise_if_not_valid_trailing_metadata(object metadata):
+ if not hasattr(metadata, '__iter__') or isinstance(metadata, dict):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ for item in metadata:
+ if not isinstance(item, tuple):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ if len(item) != 2:
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ if not isinstance(item[0], str):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
+ if not isinstance(item[1], str) and not isinstance(item[1], bytes):
+ raise TypeError(f'Invalid trailing metadata type, expected {TYPE_METADATA_STRING}: {metadata}')
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi
new file mode 100644
index 0000000000..578131f7ee
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pxd.pxi
@@ -0,0 +1,52 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+ctypedef queue[grpc_event] cpp_event_queue
+
+
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "winsock2.h" nogil:
+ ctypedef uint32_t WIN_SOCKET "SOCKET"
+ WIN_SOCKET win_socket "socket" (int af, int type, int protocol)
+ int win_socket_send "send" (WIN_SOCKET s, const char *buf, int len, int flags)
+
+
+cdef void _unified_socket_write(int fd) nogil
+
+
+cdef class BaseCompletionQueue:
+ cdef grpc_completion_queue *_cq
+
+ cdef grpc_completion_queue* c_ptr(self)
+
+
+cdef class _BoundEventLoop:
+ cdef readonly object loop
+ cdef readonly object read_socket # socket.socket
+ cdef bint _has_reader
+
+
+cdef class PollerCompletionQueue(BaseCompletionQueue):
+ cdef bint _shutdown
+ cdef cpp_event_queue _queue
+ cdef mutex _queue_mutex
+ cdef object _poller_thread # threading.Thread
+ cdef int _write_fd
+ cdef object _read_socket # socket.socket
+ cdef object _write_socket # socket.socket
+ cdef dict _loops # Mapping[asyncio.AbstractLoop, _BoundEventLoop]
+
+ cdef void _poll(self) nogil
+ cdef shutdown(self)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi
new file mode 100644
index 0000000000..b9132c8560
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/completion_queue.pyx.pxi
@@ -0,0 +1,174 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import socket
+
+cdef gpr_timespec _GPR_INF_FUTURE = gpr_inf_future(GPR_CLOCK_REALTIME)
+cdef float _POLL_AWAKE_INTERVAL_S = 0.2
+
+# This bool indicates if the event loop impl can monitor a given fd, or has
+# loop.add_reader method.
+cdef bint _has_fd_monitoring = True
+
+IF UNAME_SYSNAME == "Windows":
+ cdef void _unified_socket_write(int fd) nogil:
+ win_socket_send(<WIN_SOCKET>fd, b"1", 1, 0)
+ELSE:
+ cimport posix.unistd as unistd
+
+ cdef void _unified_socket_write(int fd) nogil:
+ unistd.write(fd, b"1", 1)
+
+
+def _handle_callback_wrapper(CallbackWrapper callback_wrapper, int success):
+ CallbackWrapper.functor_run(callback_wrapper.c_functor(), success)
+
+
+cdef class BaseCompletionQueue:
+
+ cdef grpc_completion_queue* c_ptr(self):
+ return self._cq
+
+
+cdef class _BoundEventLoop:
+
+ def __cinit__(self, object loop, object read_socket, object handler):
+ global _has_fd_monitoring
+ self.loop = loop
+ self.read_socket = read_socket
+ reader_function = functools.partial(
+ handler,
+ loop
+ )
+ # NOTE(lidiz) There isn't a way to cleanly pre-check if fd monitoring
+ # support is available or not. Checking the event loop policy is not
+ # good enough. The application can has its own loop implementation, or
+ # uses different types of event loops (e.g., 1 Proactor, 3 Selectors).
+ if _has_fd_monitoring:
+ try:
+ self.loop.add_reader(self.read_socket, reader_function)
+ self._has_reader = True
+ except NotImplementedError:
+ _has_fd_monitoring = False
+ self._has_reader = False
+
+ def close(self):
+ if self.loop:
+ if self._has_reader:
+ self.loop.remove_reader(self.read_socket)
+
+
+cdef class PollerCompletionQueue(BaseCompletionQueue):
+
+ def __cinit__(self):
+ self._cq = grpc_completion_queue_create_for_next(NULL)
+ self._shutdown = False
+ self._poller_thread = threading.Thread(target=self._poll_wrapper, daemon=True)
+ self._poller_thread.start()
+
+ self._read_socket, self._write_socket = socket.socketpair()
+ self._write_fd = self._write_socket.fileno()
+ self._loops = {}
+
+ # The read socket might be read by multiple threads. But only one of them will
+ # read the 1 byte sent by the poller thread. This setting is essential to allow
+ # multiple loops in multiple threads bound to the same poller.
+ self._read_socket.setblocking(False)
+
+ self._queue = cpp_event_queue()
+
+ def bind_loop(self, object loop):
+ if loop in self._loops:
+ return
+ else:
+ self._loops[loop] = _BoundEventLoop(loop, self._read_socket, self._handle_events)
+
+ cdef void _poll(self) nogil:
+ cdef grpc_event event
+ cdef CallbackContext *context
+
+ while not self._shutdown:
+ event = grpc_completion_queue_next(self._cq,
+ _GPR_INF_FUTURE,
+ NULL)
+
+ if event.type == GRPC_QUEUE_TIMEOUT:
+ with gil:
+ raise AssertionError("Core should not return GRPC_QUEUE_TIMEOUT!")
+ elif event.type == GRPC_QUEUE_SHUTDOWN:
+ self._shutdown = True
+ else:
+ self._queue_mutex.lock()
+ self._queue.push(event)
+ self._queue_mutex.unlock()
+ if _has_fd_monitoring:
+ _unified_socket_write(self._write_fd)
+ else:
+ with gil:
+ # Event loops can be paused or killed at any time. So,
+ # instead of deligate to any thread, the polling thread
+ # should handle the distribution of the event.
+ self._handle_events(None)
+
+ def _poll_wrapper(self):
+ with nogil:
+ self._poll()
+
+ cdef shutdown(self):
+ # Removes the socket hook from loops
+ for loop in self._loops:
+ self._loops.get(loop).close()
+
+ # TODO(https://github.com/grpc/grpc/issues/22365) perform graceful shutdown
+ grpc_completion_queue_shutdown(self._cq)
+ while not self._shutdown:
+ self._poller_thread.join(timeout=_POLL_AWAKE_INTERVAL_S)
+ grpc_completion_queue_destroy(self._cq)
+
+ # Clean up socket resources
+ self._read_socket.close()
+ self._write_socket.close()
+
+ def _handle_events(self, object context_loop):
+ cdef bytes data
+ if _has_fd_monitoring:
+ # If fd monitoring is working, clean the socket without blocking.
+ data = self._read_socket.recv(1)
+ cdef grpc_event event
+ cdef CallbackContext *context
+
+ while True:
+ self._queue_mutex.lock()
+ if self._queue.empty():
+ self._queue_mutex.unlock()
+ break
+ else:
+ event = self._queue.front()
+ self._queue.pop()
+ self._queue_mutex.unlock()
+
+ context = <CallbackContext *>event.tag
+ loop = <object>context.loop
+ if loop is context_loop:
+ # Executes callbacks: complete the future
+ CallbackWrapper.functor_run(
+ <grpc_completion_queue_functor *>event.tag,
+ event.success
+ )
+ else:
+ loop.call_soon_threadsafe(
+ _handle_callback_wrapper,
+ <CallbackWrapper>context.callback_wrapper,
+ event.success
+ )
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi
new file mode 100644
index 0000000000..ebf0660174
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pxd.pxi
@@ -0,0 +1,43 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+cdef class _AioState:
+ cdef object lock # threading.RLock
+ cdef int refcount
+ cdef object engine # AsyncIOEngine
+ cdef BaseCompletionQueue cq
+
+
+cdef grpc_completion_queue *global_completion_queue()
+
+
+cpdef init_grpc_aio()
+
+
+cpdef shutdown_grpc_aio()
+
+
+cdef extern from "src/core/lib/iomgr/timer_manager.h":
+ void grpc_timer_manager_set_threading(bint enabled)
+
+
+cdef extern from "src/core/lib/iomgr/iomgr_internal.h":
+ void grpc_set_default_iomgr_platform()
+
+
+cdef extern from "src/core/lib/iomgr/executor.h" namespace "grpc_core":
+ cdef cppclass Executor:
+ @staticmethod
+ void SetThreadingAll(bint enable)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi
new file mode 100644
index 0000000000..7f9f52da7c
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/grpc_aio.pyx.pxi
@@ -0,0 +1,114 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum
+
+cdef str _GRPC_ASYNCIO_ENGINE = os.environ.get('GRPC_ASYNCIO_ENGINE', 'poller').upper()
+cdef _AioState _global_aio_state = _AioState()
+
+
+class AsyncIOEngine(enum.Enum):
+ # NOTE(lidiz) the support for custom_io_manager is removed in favor of the
+ # EventEngine project, which will be the only IO platform in Core.
+ CUSTOM_IO_MANAGER = 'custom_io_manager'
+ POLLER = 'poller'
+
+
+cdef _default_asyncio_engine():
+ return AsyncIOEngine.POLLER
+
+
+cdef grpc_completion_queue *global_completion_queue():
+ return _global_aio_state.cq.c_ptr()
+
+
+cdef class _AioState:
+
+ def __cinit__(self):
+ self.lock = threading.RLock()
+ self.refcount = 0
+ self.engine = None
+ self.cq = None
+
+
+cdef _initialize_poller():
+ # Initializes gRPC Core, must be called before other Core API
+ grpc_init()
+
+ # Creates the only completion queue
+ _global_aio_state.cq = PollerCompletionQueue()
+
+
+cdef _actual_aio_initialization():
+ # Picks the engine for gRPC AsyncIO Stack
+ _global_aio_state.engine = AsyncIOEngine.__members__.get(
+ _GRPC_ASYNCIO_ENGINE,
+ _default_asyncio_engine(),
+ )
+ _LOGGER.debug('Using %s as I/O engine', _global_aio_state.engine)
+
+ # Initializes the process-level state accordingly
+ if _global_aio_state.engine is AsyncIOEngine.POLLER:
+ _initialize_poller()
+ else:
+ raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)
+
+
+def _grpc_shutdown_wrapper(_):
+ """A thin Python wrapper of Core's shutdown function.
+
+ Define functions are not allowed in "cdef" functions, and Cython complains
+ about a simple lambda with a C function.
+ """
+ grpc_shutdown()
+
+
+cdef _actual_aio_shutdown():
+ if _global_aio_state.engine is AsyncIOEngine.POLLER:
+ (<PollerCompletionQueue>_global_aio_state.cq).shutdown()
+ grpc_shutdown()
+ else:
+ raise ValueError('Unsupported engine type [%s]' % _global_aio_state.engine)
+
+
+cdef _initialize_per_loop():
+ cdef object loop = get_working_loop()
+ if _global_aio_state.engine is AsyncIOEngine.POLLER:
+ _global_aio_state.cq.bind_loop(loop)
+
+
+cpdef init_grpc_aio():
+ """Initializes the gRPC AsyncIO module.
+
+ Expected to be invoked on critical class constructors.
+ E.g., AioChannel, AioServer.
+ """
+ with _global_aio_state.lock:
+ _global_aio_state.refcount += 1
+ if _global_aio_state.refcount == 1:
+ _actual_aio_initialization()
+ _initialize_per_loop()
+
+
+cpdef shutdown_grpc_aio():
+ """Shuts down the gRPC AsyncIO module.
+
+ Expected to be invoked on critical class destructors.
+ E.g., AioChannel, AioServer.
+ """
+ with _global_aio_state.lock:
+ assert _global_aio_state.refcount > 0
+ _global_aio_state.refcount -= 1
+ if not _global_aio_state.refcount:
+ _actual_aio_shutdown()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi
new file mode 100644
index 0000000000..3780d8ddf2
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pxd.pxi
@@ -0,0 +1,29 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Exceptions for the aio version of the RPC calls."""
+
+
+cdef class AioRpcStatus(Exception):
+ cdef readonly:
+ grpc_status_code _code
+ str _details
+ # Per the spec, only client-side status has trailing metadata.
+ tuple _trailing_metadata
+ str _debug_error_string
+
+ cpdef grpc_status_code code(self)
+ cpdef str details(self)
+ cpdef tuple trailing_metadata(self)
+ cpdef str debug_error_string(self)
+ cdef grpc_status_code c_code(self)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi
new file mode 100644
index 0000000000..07669fc157
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/rpc_status.pyx.pxi
@@ -0,0 +1,44 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Exceptions for the aio version of the RPC calls."""
+
+
+cdef class AioRpcStatus(Exception):
+
+ # The final status of gRPC is represented by three trailing metadata:
+ # `grpc-status`, `grpc-status-message`, abd `grpc-status-details`.
+ def __cinit__(self,
+ grpc_status_code code,
+ str details,
+ tuple trailing_metadata,
+ str debug_error_string):
+ self._code = code
+ self._details = details
+ self._trailing_metadata = trailing_metadata
+ self._debug_error_string = debug_error_string
+
+ cpdef grpc_status_code code(self):
+ return self._code
+
+ cpdef str details(self):
+ return self._details
+
+ cpdef tuple trailing_metadata(self):
+ return self._trailing_metadata
+
+ cpdef str debug_error_string(self):
+ return self._debug_error_string
+
+ cdef grpc_status_code c_code(self):
+ return <grpc_status_code>self._code
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pxd.pxi
new file mode 100644
index 0000000000..fe10c3883c
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pxd.pxi
@@ -0,0 +1,92 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class _HandlerCallDetails:
+ cdef readonly str method
+ cdef readonly tuple invocation_metadata
+
+
+cdef class RPCState(GrpcCallWrapper):
+ cdef grpc_call_details details
+ cdef grpc_metadata_array request_metadata
+ cdef AioServer server
+ # NOTE(lidiz) Under certain corner case, receiving the client close
+ # operation won't immediately fail ongoing RECV_MESSAGE operations. Here I
+ # added a flag to workaround this unexpected behavior.
+ cdef bint client_closed
+ cdef object abort_exception
+ cdef bint metadata_sent
+ cdef bint status_sent
+ cdef grpc_status_code status_code
+ cdef str status_details
+ cdef tuple trailing_metadata
+ cdef object compression_algorithm
+ cdef bint disable_next_compression
+ cdef object callbacks
+
+ cdef bytes method(self)
+ cdef tuple invocation_metadata(self)
+ cdef void raise_for_termination(self) except *
+ cdef int get_write_flag(self)
+ cdef Operation create_send_initial_metadata_op_if_not_sent(self)
+
+
+cdef class _ServicerContext:
+ cdef RPCState _rpc_state
+ cdef object _loop # asyncio.AbstractEventLoop
+ cdef object _request_deserializer # Callable[[bytes], Any]
+ cdef object _response_serializer # Callable[[Any], bytes]
+
+
+cdef class _SyncServicerContext:
+ cdef _ServicerContext _context
+ cdef list _callbacks
+ cdef object _loop # asyncio.AbstractEventLoop
+
+
+cdef class _MessageReceiver:
+ cdef _ServicerContext _servicer_context
+ cdef object _agen
+
+
+cdef enum AioServerStatus:
+ AIO_SERVER_STATUS_UNKNOWN
+ AIO_SERVER_STATUS_READY
+ AIO_SERVER_STATUS_RUNNING
+ AIO_SERVER_STATUS_STOPPED
+ AIO_SERVER_STATUS_STOPPING
+
+
+cdef class _ConcurrentRpcLimiter:
+ cdef int _maximum_concurrent_rpcs
+ cdef int _active_rpcs
+ cdef object _active_rpcs_condition # asyncio.Condition
+ cdef object _loop # asyncio.EventLoop
+
+
+cdef class AioServer:
+ cdef Server _server
+ cdef list _generic_handlers
+ cdef AioServerStatus _status
+ cdef object _loop # asyncio.EventLoop
+ cdef object _serving_task # asyncio.Task
+ cdef object _shutdown_lock # asyncio.Lock
+ cdef object _shutdown_completed # asyncio.Future
+ cdef CallbackWrapper _shutdown_callback_wrapper
+ cdef object _crash_exception # Exception
+ cdef tuple _interceptors
+ cdef object _thread_pool # concurrent.futures.ThreadPoolExecutor
+ cdef _ConcurrentRpcLimiter _limiter
+
+ cdef thread_pool(self)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pyx.pxi
new file mode 100644
index 0000000000..e85efdd0b9
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/aio/server.pyx.pxi
@@ -0,0 +1,1097 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import inspect
+import traceback
+import functools
+
+
+cdef int _EMPTY_FLAG = 0
+cdef str _RPC_FINISHED_DETAILS = 'RPC already finished.'
+cdef str _SERVER_STOPPED_DETAILS = 'Server already stopped.'
+
+cdef _augment_metadata(tuple metadata, object compression):
+ if compression is None:
+ return metadata
+ else:
+ return ((
+ GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY,
+ _COMPRESSION_METADATA_STRING_MAPPING[compression]
+ ),) + metadata
+
+
+cdef class _HandlerCallDetails:
+ def __cinit__(self, str method, tuple invocation_metadata):
+ self.method = method
+ self.invocation_metadata = invocation_metadata
+
+
+class _ServerStoppedError(BaseError):
+ """Raised if the server is stopped."""
+
+
+cdef class RPCState:
+
+ def __cinit__(self, AioServer server):
+ init_grpc_aio()
+ self.call = NULL
+ self.server = server
+ grpc_metadata_array_init(&self.request_metadata)
+ grpc_call_details_init(&self.details)
+ self.client_closed = False
+ self.abort_exception = None
+ self.metadata_sent = False
+ self.status_sent = False
+ self.status_code = StatusCode.ok
+ self.status_details = ''
+ self.trailing_metadata = _IMMUTABLE_EMPTY_METADATA
+ self.compression_algorithm = None
+ self.disable_next_compression = False
+ self.callbacks = []
+
+ cdef bytes method(self):
+ return _slice_bytes(self.details.method)
+
+ cdef tuple invocation_metadata(self):
+ return _metadata(&self.request_metadata)
+
+ cdef void raise_for_termination(self) except *:
+ """Raise exceptions if RPC is not running.
+
+ Server method handlers may suppress the abort exception. We need to halt
+ the RPC execution in that case. This function needs to be called after
+ running application code.
+
+ Also, the server may stop unexpected. We need to check before calling
+ into Core functions, otherwise, segfault.
+ """
+ if self.abort_exception is not None:
+ raise self.abort_exception
+ if self.status_sent:
+ raise UsageError(_RPC_FINISHED_DETAILS)
+ if self.server._status == AIO_SERVER_STATUS_STOPPED:
+ raise _ServerStoppedError(_SERVER_STOPPED_DETAILS)
+
+ cdef int get_write_flag(self):
+ if self.disable_next_compression:
+ self.disable_next_compression = False
+ return WriteFlag.no_compress
+ else:
+ return _EMPTY_FLAG
+
+ cdef Operation create_send_initial_metadata_op_if_not_sent(self):
+ cdef SendInitialMetadataOperation op
+ if self.metadata_sent:
+ return None
+ else:
+ op = SendInitialMetadataOperation(
+ _augment_metadata(_IMMUTABLE_EMPTY_METADATA, self.compression_algorithm),
+ _EMPTY_FLAG
+ )
+ return op
+
+ def __dealloc__(self):
+ """Cleans the Core objects."""
+ grpc_call_details_destroy(&self.details)
+ grpc_metadata_array_destroy(&self.request_metadata)
+ if self.call:
+ grpc_call_unref(self.call)
+ shutdown_grpc_aio()
+
+
+cdef class _ServicerContext:
+
+ def __cinit__(self,
+ RPCState rpc_state,
+ object request_deserializer,
+ object response_serializer,
+ object loop):
+ self._rpc_state = rpc_state
+ self._request_deserializer = request_deserializer
+ self._response_serializer = response_serializer
+ self._loop = loop
+
+ async def read(self):
+ cdef bytes raw_message
+ self._rpc_state.raise_for_termination()
+
+ raw_message = await _receive_message(self._rpc_state, self._loop)
+ self._rpc_state.raise_for_termination()
+
+ if raw_message is None:
+ return EOF
+ else:
+ return deserialize(self._request_deserializer,
+ raw_message)
+
+ async def write(self, object message):
+ self._rpc_state.raise_for_termination()
+
+ await _send_message(self._rpc_state,
+ serialize(self._response_serializer, message),
+ self._rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ self._rpc_state.get_write_flag(),
+ self._loop)
+ self._rpc_state.metadata_sent = True
+
+ async def send_initial_metadata(self, object metadata):
+ self._rpc_state.raise_for_termination()
+
+ if self._rpc_state.metadata_sent:
+ raise UsageError('Send initial metadata failed: already sent')
+ else:
+ await _send_initial_metadata(
+ self._rpc_state,
+ _augment_metadata(tuple(metadata), self._rpc_state.compression_algorithm),
+ _EMPTY_FLAG,
+ self._loop
+ )
+ self._rpc_state.metadata_sent = True
+
+ async def abort(self,
+ object code,
+ str details='',
+ tuple trailing_metadata=_IMMUTABLE_EMPTY_METADATA):
+ if self._rpc_state.abort_exception is not None:
+ raise UsageError('Abort already called!')
+ else:
+ # Keeps track of the exception object. After abort happen, the RPC
+ # should stop execution. However, if users decided to suppress it, it
+ # could lead to undefined behavior.
+ self._rpc_state.abort_exception = AbortError('Locally aborted.')
+
+ if trailing_metadata == _IMMUTABLE_EMPTY_METADATA and self._rpc_state.trailing_metadata:
+ trailing_metadata = self._rpc_state.trailing_metadata
+ else:
+ raise_if_not_valid_trailing_metadata(trailing_metadata)
+ self._rpc_state.trailing_metadata = trailing_metadata
+
+ if details == '' and self._rpc_state.status_details:
+ details = self._rpc_state.status_details
+ else:
+ self._rpc_state.status_details = details
+
+ actual_code = get_status_code(code)
+ self._rpc_state.status_code = actual_code
+
+ self._rpc_state.status_sent = True
+ await _send_error_status_from_server(
+ self._rpc_state,
+ actual_code,
+ details,
+ trailing_metadata,
+ self._rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ self._loop
+ )
+
+ raise self._rpc_state.abort_exception
+
+ async def abort_with_status(self, object status):
+ await self.abort(status.code, status.details, status.trailing_metadata)
+
+ def set_trailing_metadata(self, object metadata):
+ raise_if_not_valid_trailing_metadata(metadata)
+ self._rpc_state.trailing_metadata = tuple(metadata)
+
+ def trailing_metadata(self):
+ return self._rpc_state.trailing_metadata
+
+ def invocation_metadata(self):
+ return self._rpc_state.invocation_metadata()
+
+ def set_code(self, object code):
+ self._rpc_state.status_code = get_status_code(code)
+
+ def code(self):
+ return self._rpc_state.status_code
+
+ def set_details(self, str details):
+ self._rpc_state.status_details = details
+
+ def details(self):
+ return self._rpc_state.status_details
+
+ def set_compression(self, object compression):
+ if self._rpc_state.metadata_sent:
+ raise RuntimeError('Compression setting must be specified before sending initial metadata')
+ else:
+ self._rpc_state.compression_algorithm = compression
+
+ def disable_next_message_compression(self):
+ self._rpc_state.disable_next_compression = True
+
+ def peer(self):
+ cdef char *c_peer = NULL
+ c_peer = grpc_call_get_peer(self._rpc_state.call)
+ peer = (<bytes>c_peer).decode('utf8')
+ gpr_free(c_peer)
+ return peer
+
+ def peer_identities(self):
+ cdef Call query_call = Call()
+ query_call.c_call = self._rpc_state.call
+ identities = peer_identities(query_call)
+ query_call.c_call = NULL
+ return identities
+
+ def peer_identity_key(self):
+ cdef Call query_call = Call()
+ query_call.c_call = self._rpc_state.call
+ identity_key = peer_identity_key(query_call)
+ query_call.c_call = NULL
+ if identity_key:
+ return identity_key.decode('utf8')
+ else:
+ return None
+
+ def auth_context(self):
+ cdef Call query_call = Call()
+ query_call.c_call = self._rpc_state.call
+ bytes_ctx = auth_context(query_call)
+ query_call.c_call = NULL
+ if bytes_ctx:
+ ctx = {}
+ for key in bytes_ctx:
+ ctx[key.decode('utf8')] = bytes_ctx[key]
+ return ctx
+ else:
+ return {}
+
+ def time_remaining(self):
+ if self._rpc_state.details.deadline.seconds == _GPR_INF_FUTURE.seconds:
+ return None
+ else:
+ return max(_time_from_timespec(self._rpc_state.details.deadline) - time.time(), 0)
+
+ def add_done_callback(self, callback):
+ cb = functools.partial(callback, self)
+ self._rpc_state.callbacks.append(cb)
+
+ def done(self):
+ return self._rpc_state.status_sent
+
+ def cancelled(self):
+ return self._rpc_state.status_code == StatusCode.cancelled
+
+
+cdef class _SyncServicerContext:
+ """Sync servicer context for sync handler compatibility."""
+
+ def __cinit__(self,
+ _ServicerContext context):
+ self._context = context
+ self._callbacks = []
+ self._loop = context._loop
+
+ def abort(self,
+ object code,
+ str details='',
+ tuple trailing_metadata=_IMMUTABLE_EMPTY_METADATA):
+ future = asyncio.run_coroutine_threadsafe(
+ self._context.abort(code, details, trailing_metadata),
+ self._loop)
+ # Abort should raise an AbortError
+ future.exception()
+
+ def send_initial_metadata(self, object metadata):
+ future = asyncio.run_coroutine_threadsafe(
+ self._context.send_initial_metadata(metadata),
+ self._loop)
+ future.result()
+
+ def set_trailing_metadata(self, object metadata):
+ self._context.set_trailing_metadata(metadata)
+
+ def invocation_metadata(self):
+ return self._context.invocation_metadata()
+
+ def set_code(self, object code):
+ self._context.set_code(code)
+
+ def set_details(self, str details):
+ self._context.set_details(details)
+
+ def set_compression(self, object compression):
+ self._context.set_compression(compression)
+
+ def disable_next_message_compression(self):
+ self._context.disable_next_message_compression()
+
+ def add_callback(self, object callback):
+ self._callbacks.append(callback)
+
+ def peer(self):
+ return self._context.peer()
+
+ def peer_identities(self):
+ return self._context.peer_identities()
+
+ def peer_identity_key(self):
+ return self._context.peer_identity_key()
+
+ def auth_context(self):
+ return self._context.auth_context()
+
+ def time_remaining(self):
+ return self._context.time_remaining()
+
+
+async def _run_interceptor(object interceptors, object query_handler,
+ object handler_call_details):
+ interceptor = next(interceptors, None)
+ if interceptor:
+ continuation = functools.partial(_run_interceptor, interceptors,
+ query_handler)
+ return await interceptor.intercept_service(continuation, handler_call_details)
+ else:
+ return query_handler(handler_call_details)
+
+
+def _is_async_handler(object handler):
+ """Inspect if a method handler is async or sync."""
+ return inspect.isawaitable(handler) or inspect.iscoroutinefunction(handler) or inspect.isasyncgenfunction(handler)
+
+
+async def _find_method_handler(str method, tuple metadata, list generic_handlers,
+ tuple interceptors):
+ def query_handlers(handler_call_details):
+ for generic_handler in generic_handlers:
+ method_handler = generic_handler.service(handler_call_details)
+ if method_handler is not None:
+ return method_handler
+ return None
+
+ cdef _HandlerCallDetails handler_call_details = _HandlerCallDetails(method,
+ metadata)
+ # interceptor
+ if interceptors:
+ return await _run_interceptor(iter(interceptors), query_handlers,
+ handler_call_details)
+ else:
+ return query_handlers(handler_call_details)
+
+
+async def _finish_handler_with_unary_response(RPCState rpc_state,
+ object unary_handler,
+ object request,
+ _ServicerContext servicer_context,
+ object response_serializer,
+ object loop):
+ """Finishes server method handler with a single response.
+
+ This function executes the application handler, and handles response
+ sending, as well as errors. It is shared between unary-unary and
+ stream-unary handlers.
+ """
+ # Executes application logic
+ cdef object response_message
+ cdef _SyncServicerContext sync_servicer_context
+
+ if _is_async_handler(unary_handler):
+ # Run async method handlers in this coroutine
+ response_message = await unary_handler(
+ request,
+ servicer_context,
+ )
+ else:
+ # Run sync method handlers in the thread pool
+ sync_servicer_context = _SyncServicerContext(servicer_context)
+ response_message = await loop.run_in_executor(
+ rpc_state.server.thread_pool(),
+ unary_handler,
+ request,
+ sync_servicer_context,
+ )
+ # Support sync-stack callback
+ for callback in sync_servicer_context._callbacks:
+ callback()
+
+ # Raises exception if aborted
+ rpc_state.raise_for_termination()
+
+ # Serializes the response message
+ cdef bytes response_raw
+ if rpc_state.status_code == StatusCode.ok:
+ response_raw = serialize(
+ response_serializer,
+ response_message,
+ )
+ else:
+ # Discards the response message if the status code is non-OK.
+ response_raw = b''
+
+ # Assembles the batch operations
+ cdef tuple finish_ops
+ finish_ops = (
+ SendMessageOperation(response_raw, rpc_state.get_write_flag()),
+ SendStatusFromServerOperation(
+ rpc_state.trailing_metadata,
+ rpc_state.status_code,
+ rpc_state.status_details,
+ _EMPTY_FLAGS,
+ ),
+ )
+ if not rpc_state.metadata_sent:
+ finish_ops = prepend_send_initial_metadata_op(
+ finish_ops,
+ None)
+ rpc_state.metadata_sent = True
+ rpc_state.status_sent = True
+ await execute_batch(rpc_state, finish_ops, loop)
+
+
+async def _finish_handler_with_stream_responses(RPCState rpc_state,
+ object stream_handler,
+ object request,
+ _ServicerContext servicer_context,
+ object loop):
+ """Finishes server method handler with multiple responses.
+
+ This function executes the application handler, and handles response
+ sending, as well as errors. It is shared between unary-stream and
+ stream-stream handlers.
+ """
+ cdef object async_response_generator
+ cdef object response_message
+
+ if inspect.iscoroutinefunction(stream_handler):
+ # Case 1: Coroutine async handler - using reader-writer API
+ # The handler uses reader / writer API, returns None.
+ await stream_handler(
+ request,
+ servicer_context,
+ )
+ else:
+ if inspect.isasyncgenfunction(stream_handler):
+ # Case 2: Async handler - async generator
+ # The handler uses async generator API
+ async_response_generator = stream_handler(
+ request,
+ servicer_context,
+ )
+ else:
+ # Case 3: Sync handler - normal generator
+ # NOTE(lidiz) Streaming handler in sync stack is either a generator
+ # function or a function returns a generator.
+ sync_servicer_context = _SyncServicerContext(servicer_context)
+ gen = stream_handler(request, sync_servicer_context)
+ async_response_generator = generator_to_async_generator(gen,
+ loop,
+ rpc_state.server.thread_pool())
+
+ # Consumes messages from the generator
+ async for response_message in async_response_generator:
+ # Raises exception if aborted
+ rpc_state.raise_for_termination()
+
+ await servicer_context.write(response_message)
+
+ # Raises exception if aborted
+ rpc_state.raise_for_termination()
+
+ # Sends the final status of this RPC
+ cdef SendStatusFromServerOperation op = SendStatusFromServerOperation(
+ rpc_state.trailing_metadata,
+ rpc_state.status_code,
+ rpc_state.status_details,
+ _EMPTY_FLAGS,
+ )
+
+ cdef tuple finish_ops = (op,)
+ if not rpc_state.metadata_sent:
+ finish_ops = prepend_send_initial_metadata_op(
+ finish_ops,
+ None
+ )
+ rpc_state.metadata_sent = True
+ rpc_state.status_sent = True
+ await execute_batch(rpc_state, finish_ops, loop)
+
+
+async def _handle_unary_unary_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Receives request message
+ cdef bytes request_raw = await _receive_message(rpc_state, loop)
+ if request_raw is None:
+ # The RPC was cancelled immediately after start on client side.
+ return
+
+ # Deserializes the request message
+ cdef object request_message = deserialize(
+ method_handler.request_deserializer,
+ request_raw,
+ )
+
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ None,
+ None,
+ loop,
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_unary_response(
+ rpc_state,
+ method_handler.unary_unary,
+ request_message,
+ servicer_context,
+ method_handler.response_serializer,
+ loop
+ )
+
+
+async def _handle_unary_stream_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Receives request message
+ cdef bytes request_raw = await _receive_message(rpc_state, loop)
+ if request_raw is None:
+ return
+
+ # Deserializes the request message
+ cdef object request_message = deserialize(
+ method_handler.request_deserializer,
+ request_raw,
+ )
+
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ method_handler.request_deserializer,
+ method_handler.response_serializer,
+ loop,
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_stream_responses(
+ rpc_state,
+ method_handler.unary_stream,
+ request_message,
+ servicer_context,
+ loop,
+ )
+
+
+cdef class _MessageReceiver:
+ """Bridge between the async generator API and the reader-writer API."""
+
+ def __cinit__(self, _ServicerContext servicer_context):
+ self._servicer_context = servicer_context
+ self._agen = None
+
+ async def _async_message_receiver(self):
+ """An async generator that receives messages."""
+ cdef object message
+ while True:
+ message = await self._servicer_context.read()
+ if message is not EOF:
+ yield message
+ else:
+ break
+
+ def __aiter__(self):
+ # Prevents never awaited warning if application never used the async generator
+ if self._agen is None:
+ self._agen = self._async_message_receiver()
+ return self._agen
+
+ async def __anext__(self):
+ return await self.__aiter__().__anext__()
+
+
+async def _handle_stream_unary_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ method_handler.request_deserializer,
+ None,
+ loop,
+ )
+
+ # Prepares the request generator
+ cdef object request_iterator
+ if _is_async_handler(method_handler.stream_unary):
+ request_iterator = _MessageReceiver(servicer_context)
+ else:
+ request_iterator = async_generator_to_generator(
+ _MessageReceiver(servicer_context),
+ loop
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_unary_response(
+ rpc_state,
+ method_handler.stream_unary,
+ request_iterator,
+ servicer_context,
+ method_handler.response_serializer,
+ loop
+ )
+
+
+async def _handle_stream_stream_rpc(object method_handler,
+ RPCState rpc_state,
+ object loop):
+ # Creates a dedecated ServicerContext
+ cdef _ServicerContext servicer_context = _ServicerContext(
+ rpc_state,
+ method_handler.request_deserializer,
+ method_handler.response_serializer,
+ loop,
+ )
+
+ # Prepares the request generator
+ cdef object request_iterator
+ if _is_async_handler(method_handler.stream_stream):
+ request_iterator = _MessageReceiver(servicer_context)
+ else:
+ request_iterator = async_generator_to_generator(
+ _MessageReceiver(servicer_context),
+ loop
+ )
+
+ # Finishes the application handler
+ await _finish_handler_with_stream_responses(
+ rpc_state,
+ method_handler.stream_stream,
+ request_iterator,
+ servicer_context,
+ loop,
+ )
+
+
+async def _handle_exceptions(RPCState rpc_state, object rpc_coro, object loop):
+ try:
+ try:
+ await rpc_coro
+ except AbortError as e:
+ # Caught AbortError check if it is the same one
+ assert rpc_state.abort_exception is e, 'Abort error has been replaced!'
+ return
+ else:
+ # Check if the abort exception got suppressed
+ if rpc_state.abort_exception is not None:
+ _LOGGER.error(
+ 'Abort error unexpectedly suppressed: %s',
+ traceback.format_exception(rpc_state.abort_exception)
+ )
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except asyncio.CancelledError:
+ _LOGGER.debug('RPC cancelled for servicer method [%s]', _decode(rpc_state.method()))
+ except _ServerStoppedError:
+ _LOGGER.warning('Aborting method [%s] due to server stop.', _decode(rpc_state.method()))
+ except ExecuteBatchError:
+ # If client closed (aka. cancelled), ignore the failed batch operations.
+ if rpc_state.client_closed:
+ return
+ else:
+ raise
+ except Exception as e:
+ _LOGGER.exception('Unexpected [%s] raised by servicer method [%s]' % (
+ type(e).__name__,
+ _decode(rpc_state.method()),
+ ))
+ if not rpc_state.status_sent and rpc_state.server._status != AIO_SERVER_STATUS_STOPPED:
+ # Allows users to raise other types of exception with specified status code
+ if rpc_state.status_code == StatusCode.ok:
+ status_code = StatusCode.unknown
+ else:
+ status_code = rpc_state.status_code
+
+ rpc_state.status_sent = True
+ try:
+ await _send_error_status_from_server(
+ rpc_state,
+ status_code,
+ 'Unexpected %s: %s' % (type(e), e),
+ rpc_state.trailing_metadata,
+ rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ loop
+ )
+ except ExecuteBatchError:
+ _LOGGER.exception('Failed sending error status from server')
+ traceback.print_exc()
+
+
+cdef _add_callback_handler(object rpc_task, RPCState rpc_state):
+
+ def handle_callbacks(object unused_task):
+ try:
+ for callback in rpc_state.callbacks:
+ # The _ServicerContext object is bound in add_done_callback.
+ callback()
+ except:
+ _LOGGER.exception('Error in callback for method [%s]', _decode(rpc_state.method()))
+
+ rpc_task.add_done_callback(handle_callbacks)
+
+
+async def _handle_cancellation_from_core(object rpc_task,
+ RPCState rpc_state,
+ object loop):
+ cdef ReceiveCloseOnServerOperation op = ReceiveCloseOnServerOperation(_EMPTY_FLAG)
+ cdef tuple ops = (op,)
+
+ # Awaits cancellation from peer.
+ await execute_batch(rpc_state, ops, loop)
+ rpc_state.client_closed = True
+ # If 1) received cancel signal; 2) the Task is not finished; 3) the server
+ # wasn't replying final status. For condition 3, it might cause inaccurate
+ # log that an RPC is both aborted and cancelled.
+ if op.cancelled() and not rpc_task.done() and not rpc_state.status_sent:
+ # Injects `CancelledError` to halt the RPC coroutine
+ rpc_task.cancel()
+
+
+async def _schedule_rpc_coro(object rpc_coro,
+ RPCState rpc_state,
+ object loop):
+ # Schedules the RPC coroutine.
+ cdef object rpc_task = loop.create_task(_handle_exceptions(
+ rpc_state,
+ rpc_coro,
+ loop,
+ ))
+ _add_callback_handler(rpc_task, rpc_state)
+ await _handle_cancellation_from_core(rpc_task, rpc_state, loop)
+
+
+async def _handle_rpc(list generic_handlers, tuple interceptors,
+ RPCState rpc_state, object loop):
+ cdef object method_handler
+ # Finds the method handler (application logic)
+ method_handler = await _find_method_handler(
+ rpc_state.method().decode(),
+ rpc_state.invocation_metadata(),
+ generic_handlers,
+ interceptors,
+ )
+ if method_handler is None:
+ rpc_state.status_sent = True
+ await _send_error_status_from_server(
+ rpc_state,
+ StatusCode.unimplemented,
+ 'Method not found!',
+ _IMMUTABLE_EMPTY_METADATA,
+ rpc_state.create_send_initial_metadata_op_if_not_sent(),
+ loop
+ )
+ return
+
+ # Handles unary-unary case
+ if not method_handler.request_streaming and not method_handler.response_streaming:
+ await _handle_unary_unary_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+ # Handles unary-stream case
+ if not method_handler.request_streaming and method_handler.response_streaming:
+ await _handle_unary_stream_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+ # Handles stream-unary case
+ if method_handler.request_streaming and not method_handler.response_streaming:
+ await _handle_stream_unary_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+ # Handles stream-stream case
+ if method_handler.request_streaming and method_handler.response_streaming:
+ await _handle_stream_stream_rpc(method_handler,
+ rpc_state,
+ loop)
+ return
+
+
+class _RequestCallError(Exception): pass
+
+cdef CallbackFailureHandler REQUEST_CALL_FAILURE_HANDLER = CallbackFailureHandler(
+ 'grpc_server_request_call', None, _RequestCallError)
+
+
+cdef CallbackFailureHandler SERVER_SHUTDOWN_FAILURE_HANDLER = CallbackFailureHandler(
+ 'grpc_server_shutdown_and_notify',
+ None,
+ InternalError)
+
+
+cdef class _ConcurrentRpcLimiter:
+
+ def __cinit__(self, int maximum_concurrent_rpcs, object loop):
+ if maximum_concurrent_rpcs <= 0:
+ raise ValueError("maximum_concurrent_rpcs should be a postive integer")
+ self._maximum_concurrent_rpcs = maximum_concurrent_rpcs
+ self._active_rpcs = 0
+ self._active_rpcs_condition = asyncio.Condition()
+ self._loop = loop
+
+ async def check_before_request_call(self):
+ await self._active_rpcs_condition.acquire()
+ try:
+ predicate = lambda: self._active_rpcs < self._maximum_concurrent_rpcs
+ await self._active_rpcs_condition.wait_for(predicate)
+ self._active_rpcs += 1
+ finally:
+ self._active_rpcs_condition.release()
+
+ async def _decrease_active_rpcs_count_with_lock(self):
+ await self._active_rpcs_condition.acquire()
+ try:
+ self._active_rpcs -= 1
+ self._active_rpcs_condition.notify()
+ finally:
+ self._active_rpcs_condition.release()
+
+ def _decrease_active_rpcs_count(self, unused_future):
+ self._loop.create_task(self._decrease_active_rpcs_count_with_lock())
+
+ def decrease_once_finished(self, object rpc_task):
+ rpc_task.add_done_callback(self._decrease_active_rpcs_count)
+
+
+cdef class AioServer:
+
+ def __init__(self, loop, thread_pool, generic_handlers, interceptors,
+ options, maximum_concurrent_rpcs):
+ init_grpc_aio()
+ # NOTE(lidiz) Core objects won't be deallocated automatically.
+ # If AioServer.shutdown is not called, those objects will leak.
+ # TODO(rbellevi): Support xDS in aio server.
+ self._server = Server(options, False)
+ grpc_server_register_completion_queue(
+ self._server.c_server,
+ global_completion_queue(),
+ NULL
+ )
+
+ self._loop = loop
+ self._status = AIO_SERVER_STATUS_READY
+ self._generic_handlers = []
+ self.add_generic_rpc_handlers(generic_handlers)
+ self._serving_task = None
+
+ self._shutdown_lock = asyncio.Lock()
+ self._shutdown_completed = self._loop.create_future()
+ self._shutdown_callback_wrapper = CallbackWrapper(
+ self._shutdown_completed,
+ self._loop,
+ SERVER_SHUTDOWN_FAILURE_HANDLER)
+ self._crash_exception = None
+
+ if interceptors:
+ self._interceptors = tuple(interceptors)
+ else:
+ self._interceptors = ()
+
+ self._thread_pool = thread_pool
+ if maximum_concurrent_rpcs is not None:
+ self._limiter = _ConcurrentRpcLimiter(maximum_concurrent_rpcs,
+ loop)
+
+ def add_generic_rpc_handlers(self, object generic_rpc_handlers):
+ self._generic_handlers.extend(generic_rpc_handlers)
+
+ def add_insecure_port(self, address):
+ return self._server.add_http2_port(address)
+
+ def add_secure_port(self, address, server_credentials):
+ return self._server.add_http2_port(address,
+ server_credentials._credentials)
+
+ async def _request_call(self):
+ cdef grpc_call_error error
+ cdef RPCState rpc_state = RPCState(self)
+ cdef object future = self._loop.create_future()
+ cdef CallbackWrapper wrapper = CallbackWrapper(
+ future,
+ self._loop,
+ REQUEST_CALL_FAILURE_HANDLER)
+ error = grpc_server_request_call(
+ self._server.c_server, &rpc_state.call, &rpc_state.details,
+ &rpc_state.request_metadata,
+ global_completion_queue(), global_completion_queue(),
+ wrapper.c_functor()
+ )
+ if error != GRPC_CALL_OK:
+ raise InternalError("Error in grpc_server_request_call: %s" % error)
+
+ await future
+ return rpc_state
+
+ async def _server_main_loop(self,
+ object server_started):
+ self._server.start(backup_queue=False)
+ cdef RPCState rpc_state
+ server_started.set_result(True)
+
+ while True:
+ # When shutdown begins, no more new connections.
+ if self._status != AIO_SERVER_STATUS_RUNNING:
+ break
+
+ if self._limiter is not None:
+ await self._limiter.check_before_request_call()
+
+ # Accepts new request from Core
+ rpc_state = await self._request_call()
+
+ # Creates the dedicated RPC coroutine. If we schedule it right now,
+ # there is no guarantee if the cancellation listening coroutine is
+ # ready or not. So, we should control the ordering by scheduling
+ # the coroutine onto event loop inside of the cancellation
+ # coroutine.
+ rpc_coro = _handle_rpc(self._generic_handlers,
+ self._interceptors,
+ rpc_state,
+ self._loop)
+
+ # Fires off a task that listens on the cancellation from client.
+ rpc_task = self._loop.create_task(
+ _schedule_rpc_coro(
+ rpc_coro,
+ rpc_state,
+ self._loop
+ )
+ )
+
+ if self._limiter is not None:
+ self._limiter.decrease_once_finished(rpc_task)
+
+ def _serving_task_crash_handler(self, object task):
+ """Shutdown the server immediately if unexpectedly exited."""
+ if task.cancelled():
+ return
+ if task.exception() is None:
+ return
+ if self._status != AIO_SERVER_STATUS_STOPPING:
+ self._crash_exception = task.exception()
+ _LOGGER.exception(self._crash_exception)
+ self._loop.create_task(self.shutdown(None))
+
+ async def start(self):
+ if self._status == AIO_SERVER_STATUS_RUNNING:
+ return
+ elif self._status != AIO_SERVER_STATUS_READY:
+ raise UsageError('Server not in ready state')
+
+ self._status = AIO_SERVER_STATUS_RUNNING
+ cdef object server_started = self._loop.create_future()
+ self._serving_task = self._loop.create_task(self._server_main_loop(server_started))
+ self._serving_task.add_done_callback(self._serving_task_crash_handler)
+ # Needs to explicitly wait for the server to start up.
+ # Otherwise, the actual start time of the server is un-controllable.
+ await server_started
+
+ async def _start_shutting_down(self):
+ """Prepares the server to shutting down.
+
+ This coroutine function is NOT coroutine-safe.
+ """
+ # The shutdown callback won't be called until there is no live RPC.
+ grpc_server_shutdown_and_notify(
+ self._server.c_server,
+ global_completion_queue(),
+ self._shutdown_callback_wrapper.c_functor())
+
+ # Ensures the serving task (coroutine) exits.
+ try:
+ await self._serving_task
+ except _RequestCallError:
+ pass
+
+ async def shutdown(self, grace):
+ """Gracefully shutdown the Core server.
+
+ Application should only call shutdown once.
+
+ Args:
+ grace: An optional float indicating the length of grace period in
+ seconds.
+ """
+ if self._status == AIO_SERVER_STATUS_READY or self._status == AIO_SERVER_STATUS_STOPPED:
+ return
+
+ async with self._shutdown_lock:
+ if self._status == AIO_SERVER_STATUS_RUNNING:
+ self._server.is_shutting_down = True
+ self._status = AIO_SERVER_STATUS_STOPPING
+ await self._start_shutting_down()
+
+ if grace is None:
+ # Directly cancels all calls
+ grpc_server_cancel_all_calls(self._server.c_server)
+ await self._shutdown_completed
+ else:
+ try:
+ await asyncio.wait_for(
+ asyncio.shield(self._shutdown_completed),
+ grace,
+ )
+ except asyncio.TimeoutError:
+ # Cancels all ongoing calls by the end of grace period.
+ grpc_server_cancel_all_calls(self._server.c_server)
+ await self._shutdown_completed
+
+ async with self._shutdown_lock:
+ if self._status == AIO_SERVER_STATUS_STOPPING:
+ grpc_server_destroy(self._server.c_server)
+ self._server.c_server = NULL
+ self._server.is_shutdown = True
+ self._status = AIO_SERVER_STATUS_STOPPED
+
+ async def wait_for_termination(self, object timeout):
+ if timeout is None:
+ await self._shutdown_completed
+ else:
+ try:
+ await asyncio.wait_for(
+ asyncio.shield(self._shutdown_completed),
+ timeout,
+ )
+ except asyncio.TimeoutError:
+ if self._crash_exception is not None:
+ raise self._crash_exception
+ return True
+ if self._crash_exception is not None:
+ raise self._crash_exception
+ return False
+
+ def __dealloc__(self):
+ """Deallocation of Core objects are ensured by Python layer."""
+ # TODO(lidiz) if users create server, and then dealloc it immediately.
+ # There is a potential memory leak of created Core server.
+ if self._status != AIO_SERVER_STATUS_STOPPED:
+ _LOGGER.debug(
+ '__dealloc__ called on running server %s with status %d',
+ self,
+ self._status
+ )
+ shutdown_grpc_aio()
+
+ cdef thread_pool(self):
+ """Access the thread pool instance."""
+ return self._thread_pool
+
+ def is_running(self):
+ return self._status == AIO_SERVER_STATUS_RUNNING
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pxd.pxi
new file mode 100644
index 0000000000..251efe15b3
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pxd.pxi
@@ -0,0 +1,36 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef tuple _wrap_grpc_arg(grpc_arg arg)
+
+
+cdef grpc_arg _unwrap_grpc_arg(tuple wrapped_arg)
+
+
+cdef class _ChannelArg:
+
+ cdef grpc_arg c_argument
+
+ cdef void c(self, argument, references) except *
+
+
+cdef class _ChannelArgs:
+
+ cdef readonly tuple _arguments
+ cdef list _channel_args
+ cdef readonly list _references
+ cdef grpc_channel_args _c_arguments
+
+ cdef grpc_channel_args *c_args(self) except *
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pyx.pxi
new file mode 100644
index 0000000000..9df308cdbc
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/arguments.pyx.pxi
@@ -0,0 +1,85 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _GrpcArgWrapper:
+
+ cdef grpc_arg arg
+
+
+cdef tuple _wrap_grpc_arg(grpc_arg arg):
+ wrapped = _GrpcArgWrapper()
+ wrapped.arg = arg
+ return ("grpc.python._cygrpc._GrpcArgWrapper", wrapped)
+
+
+cdef grpc_arg _unwrap_grpc_arg(tuple wrapped_arg):
+ cdef _GrpcArgWrapper wrapped = wrapped_arg[1]
+ return wrapped.arg
+
+
+cdef class _ChannelArg:
+
+ cdef void c(self, argument, references) except *:
+ key, value = argument
+ cdef bytes encoded_key = _encode(key)
+ if encoded_key is not key:
+ references.append(encoded_key)
+ self.c_argument.key = encoded_key
+ if isinstance(value, int):
+ self.c_argument.type = GRPC_ARG_INTEGER
+ self.c_argument.value.integer = value
+ elif isinstance(value, (bytes, str, unicode,)):
+ self.c_argument.type = GRPC_ARG_STRING
+ encoded_value = _encode(value)
+ if encoded_value is not value:
+ references.append(encoded_value)
+ self.c_argument.value.string = encoded_value
+ elif isinstance(value, _GrpcArgWrapper):
+ self.c_argument = (<_GrpcArgWrapper>value).arg
+ elif hasattr(value, '__int__'):
+ # Pointer objects must override __int__() to return
+ # the underlying C address (Python ints are word size). The
+ # lifecycle of the pointer is fixed to the lifecycle of the
+ # python object wrapping it.
+ self.c_argument.type = GRPC_ARG_POINTER
+ self.c_argument.value.pointer.vtable = &default_vtable
+ self.c_argument.value.pointer.address = <void*>(<intptr_t>int(value))
+ else:
+ raise TypeError(
+ 'Expected int, bytes, or behavior, got {}'.format(type(value)))
+
+
+cdef class _ChannelArgs:
+
+ def __cinit__(self, arguments):
+ self._arguments = () if arguments is None else tuple(arguments)
+ self._channel_args = []
+ self._references = []
+ self._c_arguments.arguments_length = len(self._arguments)
+ if self._c_arguments.arguments_length != 0:
+ self._c_arguments.arguments = <grpc_arg *>gpr_malloc(
+ self._c_arguments.arguments_length * sizeof(grpc_arg))
+ for index, argument in enumerate(self._arguments):
+ channel_arg = _ChannelArg()
+ channel_arg.c(argument, self._references)
+ self._c_arguments.arguments[index] = channel_arg.c_argument
+ self._channel_args.append(channel_arg)
+
+ cdef grpc_channel_args *c_args(self) except *:
+ return &self._c_arguments
+
+ def __dealloc__(self):
+ if self._c_arguments.arguments != NULL:
+ gpr_free(self._c_arguments.arguments)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pxd.pxi
new file mode 100644
index 0000000000..8babeb4536
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pxd.pxi
@@ -0,0 +1,20 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Call:
+
+ cdef grpc_call *c_call
+ cdef list references
+
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pyx.pxi
new file mode 100644
index 0000000000..f68e166b17
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -0,0 +1,97 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Call:
+
+ def __cinit__(self):
+ # Create an *empty* call
+ fork_handlers_and_grpc_init()
+ self.c_call = NULL
+ self.references = []
+
+ def _start_batch(self, operations, tag, retain_self):
+ if not self.is_valid:
+ raise ValueError("invalid call object cannot be used from Python")
+ cdef _BatchOperationTag batch_operation_tag = _BatchOperationTag(
+ tag, operations, self if retain_self else None)
+ batch_operation_tag.prepare()
+ cpython.Py_INCREF(batch_operation_tag)
+ cdef grpc_call_error error
+ with nogil:
+ error = grpc_call_start_batch(
+ self.c_call, batch_operation_tag.c_ops, batch_operation_tag.c_nops,
+ <cpython.PyObject *>batch_operation_tag, NULL)
+ return error
+
+ def start_client_batch(self, operations, tag):
+ # We don't reference this call in the operations tag because
+ # it should be cancelled when it goes out of scope
+ return self._start_batch(operations, tag, False)
+
+ def start_server_batch(self, operations, tag):
+ return self._start_batch(operations, tag, True)
+
+ def cancel(
+ self, grpc_status_code error_code=GRPC_STATUS__DO_NOT_USE,
+ details=None):
+ details = str_to_bytes(details)
+ if not self.is_valid:
+ raise ValueError("invalid call object cannot be used from Python")
+ if (details is None) != (error_code == GRPC_STATUS__DO_NOT_USE):
+ raise ValueError("if error_code is specified, so must details "
+ "(and vice-versa)")
+ cdef grpc_call_error result
+ cdef char *c_details = NULL
+ if error_code != GRPC_STATUS__DO_NOT_USE:
+ self.references.append(details)
+ c_details = details
+ with nogil:
+ result = grpc_call_cancel_with_status(
+ self.c_call, error_code, c_details, NULL)
+ return result
+ else:
+ with nogil:
+ result = grpc_call_cancel(self.c_call, NULL)
+ return result
+
+ def set_credentials(self, CallCredentials call_credentials not None):
+ cdef grpc_call_credentials *c_call_credentials = call_credentials.c()
+ cdef grpc_call_error call_error = grpc_call_set_credentials(
+ self.c_call, c_call_credentials)
+ grpc_call_credentials_release(c_call_credentials)
+ return call_error
+
+ def peer(self):
+ cdef char *peer = NULL
+ with nogil:
+ peer = grpc_call_get_peer(self.c_call)
+ result = <bytes>peer
+ with nogil:
+ gpr_free(peer)
+ return result
+
+ def __dealloc__(self):
+ with nogil:
+ if self.c_call != NULL:
+ grpc_call_unref(self.c_call)
+ grpc_shutdown()
+
+ # The object *should* always be valid from Python. Used for debugging.
+ @property
+ def is_valid(self):
+ return self.c_call != NULL
+
+ def _custom_op_on_c_call(self, int op):
+ return _custom_op_on_c_call(op, self.c_call)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pxd.pxi
new file mode 100644
index 0000000000..eb27f2df7a
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pxd.pxi
@@ -0,0 +1,74 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef _check_call_error_no_metadata(c_call_error)
+
+
+cdef _check_and_raise_call_error_no_metadata(c_call_error)
+
+
+cdef _check_call_error(c_call_error, metadata)
+
+
+cdef class _CallState:
+
+ cdef grpc_call *c_call
+ cdef set due
+
+
+cdef class _ChannelState:
+
+ cdef object condition
+ cdef grpc_channel *c_channel
+ # A boolean field indicating that the channel is open (if True) or is being
+ # closed (i.e. a call to close is currently executing) or is closed (if
+ # False).
+ # TODO(https://github.com/grpc/grpc/issues/3064): Eliminate "is being closed"
+ # a state in which condition may be acquired by any thread, eliminate this
+ # field and just use the NULLness of c_channel as an indication that the
+ # channel is closed.
+ cdef object open
+ cdef object closed_reason
+
+ # A dict from _BatchOperationTag to _CallState
+ cdef dict integrated_call_states
+ cdef grpc_completion_queue *c_call_completion_queue
+
+ # A set of _CallState
+ cdef set segregated_call_states
+
+ cdef set connectivity_due
+ cdef grpc_completion_queue *c_connectivity_completion_queue
+
+
+cdef class IntegratedCall:
+
+ cdef _ChannelState _channel_state
+ cdef _CallState _call_state
+
+
+cdef class SegregatedCall:
+
+ cdef _ChannelState _channel_state
+ cdef _CallState _call_state
+ cdef grpc_completion_queue *_c_completion_queue
+
+
+cdef class Channel:
+
+ cdef _ChannelState _state
+
+ # TODO(https://github.com/grpc/grpc/issues/15662): Eliminate this.
+ cdef tuple _arguments
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pyx.pxi
new file mode 100644
index 0000000000..d49a4210f7
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -0,0 +1,516 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
+ 'Internal gRPC call error %d. ' +
+ 'Please report to https://github.com/grpc/grpc/issues')
+
+
+cdef str _call_error_metadata(metadata):
+ return 'metadata was invalid: %s' % metadata
+
+
+cdef str _call_error_no_metadata(c_call_error):
+ return _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % c_call_error
+
+
+cdef str _call_error(c_call_error, metadata):
+ if c_call_error == GRPC_CALL_ERROR_INVALID_METADATA:
+ return _call_error_metadata(metadata)
+ else:
+ return _call_error_no_metadata(c_call_error)
+
+
+cdef _check_call_error_no_metadata(c_call_error):
+ if c_call_error != GRPC_CALL_OK:
+ return _INTERNAL_CALL_ERROR_MESSAGE_FORMAT % c_call_error
+ else:
+ return None
+
+
+cdef _check_and_raise_call_error_no_metadata(c_call_error):
+ error = _check_call_error_no_metadata(c_call_error)
+ if error is not None:
+ raise ValueError(error)
+
+
+cdef _check_call_error(c_call_error, metadata):
+ if c_call_error == GRPC_CALL_ERROR_INVALID_METADATA:
+ return _call_error_metadata(metadata)
+ else:
+ return _check_call_error_no_metadata(c_call_error)
+
+
+cdef void _raise_call_error_no_metadata(c_call_error) except *:
+ raise ValueError(_call_error_no_metadata(c_call_error))
+
+
+cdef void _raise_call_error(c_call_error, metadata) except *:
+ raise ValueError(_call_error(c_call_error, metadata))
+
+
+cdef _destroy_c_completion_queue(grpc_completion_queue *c_completion_queue):
+ grpc_completion_queue_shutdown(c_completion_queue)
+ grpc_completion_queue_destroy(c_completion_queue)
+
+
+cdef class _CallState:
+
+ def __cinit__(self):
+ self.due = set()
+
+
+cdef class _ChannelState:
+
+ def __cinit__(self):
+ self.condition = threading.Condition()
+ self.open = True
+ self.integrated_call_states = {}
+ self.segregated_call_states = set()
+ self.connectivity_due = set()
+ self.closed_reason = None
+
+
+cdef tuple _operate(grpc_call *c_call, object operations, object user_tag):
+ cdef grpc_call_error c_call_error
+ cdef _BatchOperationTag tag = _BatchOperationTag(user_tag, operations, None)
+ tag.prepare()
+ cpython.Py_INCREF(tag)
+ with nogil:
+ c_call_error = grpc_call_start_batch(
+ c_call, tag.c_ops, tag.c_nops, <cpython.PyObject *>tag, NULL)
+ return c_call_error, tag
+
+
+cdef object _operate_from_integrated_call(
+ _ChannelState channel_state, _CallState call_state, object operations,
+ object user_tag):
+ cdef grpc_call_error c_call_error
+ cdef _BatchOperationTag tag
+ with channel_state.condition:
+ if call_state.due:
+ c_call_error, tag = _operate(call_state.c_call, operations, user_tag)
+ if c_call_error == GRPC_CALL_OK:
+ call_state.due.add(tag)
+ channel_state.integrated_call_states[tag] = call_state
+ return True
+ else:
+ _raise_call_error_no_metadata(c_call_error)
+ else:
+ return False
+
+
+cdef object _operate_from_segregated_call(
+ _ChannelState channel_state, _CallState call_state, object operations,
+ object user_tag):
+ cdef grpc_call_error c_call_error
+ cdef _BatchOperationTag tag
+ with channel_state.condition:
+ if call_state.due:
+ c_call_error, tag = _operate(call_state.c_call, operations, user_tag)
+ if c_call_error == GRPC_CALL_OK:
+ call_state.due.add(tag)
+ return True
+ else:
+ _raise_call_error_no_metadata(c_call_error)
+ else:
+ return False
+
+
+cdef _cancel(
+ _ChannelState channel_state, _CallState call_state, grpc_status_code code,
+ str details):
+ cdef grpc_call_error c_call_error
+ with channel_state.condition:
+ if call_state.due:
+ c_call_error = grpc_call_cancel_with_status(
+ call_state.c_call, code, _encode(details), NULL)
+ _check_and_raise_call_error_no_metadata(c_call_error)
+
+
+cdef _next_call_event(
+ _ChannelState channel_state, grpc_completion_queue *c_completion_queue,
+ on_success, on_failure, deadline):
+ """Block on the next event out of the completion queue.
+
+ On success, `on_success` will be invoked with the tag taken from the CQ.
+ In the case of a failure due to an exception raised in a signal handler,
+ `on_failure` will be invoked with no arguments. Note that this situation
+ can only occur on the main thread.
+
+ Args:
+ channel_state: The state for the channel on which the RPC is running.
+ c_completion_queue: The CQ which will be polled.
+ on_success: A callable object to be invoked upon successful receipt of a
+ tag from the CQ.
+ on_failure: A callable object to be invoked in case a Python exception is
+ raised from a signal handler during polling.
+ deadline: The point after which the RPC will time out.
+ """
+ try:
+ tag, event = _latent_event(c_completion_queue, deadline)
+ # NOTE(rbellevi): This broad except enables us to clean up resources before
+ # propagating any exceptions raised by signal handlers to the application.
+ except:
+ if on_failure is not None:
+ on_failure()
+ raise
+ else:
+ with channel_state.condition:
+ on_success(tag)
+ channel_state.condition.notify_all()
+ return event
+
+
+# TODO(https://github.com/grpc/grpc/issues/14569): This could be a lot simpler.
+cdef void _call(
+ _ChannelState channel_state, _CallState call_state,
+ grpc_completion_queue *c_completion_queue, on_success, int flags, method,
+ host, object deadline, CallCredentials credentials,
+ object operationses_and_user_tags, object metadata,
+ object context) except *:
+ """Invokes an RPC.
+
+ Args:
+ channel_state: A _ChannelState with its "open" attribute set to True. RPCs
+ may not be invoked on a closed channel.
+ call_state: An empty _CallState to be altered (specifically assigned a
+ c_call and having its due set populated) if the RPC invocation is
+ successful.
+ c_completion_queue: A grpc_completion_queue to be used for the call's
+ operations.
+ on_success: A behavior to be called if attempting to start operations for
+ the call succeeds. If called the behavior will be called while holding the
+ channel_state condition and passed the tags associated with operations
+ that were successfully started for the call.
+ flags: Flags to be passed to gRPC Core as part of call creation.
+ method: The fully-qualified name of the RPC method being invoked.
+ host: A "host" string to be passed to gRPC Core as part of call creation.
+ deadline: A float for the deadline of the RPC, or None if the RPC is to have
+ no deadline.
+ credentials: A _CallCredentials for the RPC or None.
+ operationses_and_user_tags: A sequence of length-two sequences the first
+ element of which is a sequence of Operations and the second element of
+ which is an object to be used as a tag. A SendInitialMetadataOperation
+ must be present in the first element of this value.
+ metadata: The metadata for this call.
+ context: Context object for distributed tracing.
+ """
+ cdef grpc_slice method_slice
+ cdef grpc_slice host_slice
+ cdef grpc_slice *host_slice_ptr
+ cdef grpc_call_credentials *c_call_credentials
+ cdef grpc_call_error c_call_error
+ cdef tuple error_and_wrapper_tag
+ cdef _BatchOperationTag wrapper_tag
+ with channel_state.condition:
+ if channel_state.open:
+ method_slice = _slice_from_bytes(method)
+ if host is None:
+ host_slice_ptr = NULL
+ else:
+ host_slice = _slice_from_bytes(host)
+ host_slice_ptr = &host_slice
+ call_state.c_call = grpc_channel_create_call(
+ channel_state.c_channel, NULL, flags,
+ c_completion_queue, method_slice, host_slice_ptr,
+ _timespec_from_time(deadline), NULL)
+ grpc_slice_unref(method_slice)
+ if host_slice_ptr:
+ grpc_slice_unref(host_slice)
+ if context is not None:
+ set_census_context_on_call(call_state, context)
+ if credentials is not None:
+ c_call_credentials = credentials.c()
+ c_call_error = grpc_call_set_credentials(
+ call_state.c_call, c_call_credentials)
+ grpc_call_credentials_release(c_call_credentials)
+ if c_call_error != GRPC_CALL_OK:
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+ _raise_call_error_no_metadata(c_call_error)
+ started_tags = set()
+ for operations, user_tag in operationses_and_user_tags:
+ c_call_error, tag = _operate(call_state.c_call, operations, user_tag)
+ if c_call_error == GRPC_CALL_OK:
+ started_tags.add(tag)
+ else:
+ grpc_call_cancel(call_state.c_call, NULL)
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+ _raise_call_error(c_call_error, metadata)
+ else:
+ call_state.due.update(started_tags)
+ on_success(started_tags)
+ else:
+ raise ValueError('Cannot invoke RPC: %s' % channel_state.closed_reason)
+
+
+cdef void _process_integrated_call_tag(
+ _ChannelState state, _BatchOperationTag tag) except *:
+ cdef _CallState call_state = state.integrated_call_states.pop(tag)
+ call_state.due.remove(tag)
+ if not call_state.due:
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+
+
+cdef class IntegratedCall:
+
+ def __cinit__(self, _ChannelState channel_state, _CallState call_state):
+ self._channel_state = channel_state
+ self._call_state = call_state
+
+ def operate(self, operations, tag):
+ return _operate_from_integrated_call(
+ self._channel_state, self._call_state, operations, tag)
+
+ def cancel(self, code, details):
+ _cancel(self._channel_state, self._call_state, code, details)
+
+
+cdef IntegratedCall _integrated_call(
+ _ChannelState state, int flags, method, host, object deadline,
+ object metadata, CallCredentials credentials, operationses_and_user_tags,
+ object context):
+ call_state = _CallState()
+
+ def on_success(started_tags):
+ for started_tag in started_tags:
+ state.integrated_call_states[started_tag] = call_state
+
+ _call(
+ state, call_state, state.c_call_completion_queue, on_success, flags,
+ method, host, deadline, credentials, operationses_and_user_tags, metadata, context)
+
+ return IntegratedCall(state, call_state)
+
+
+cdef object _process_segregated_call_tag(
+ _ChannelState state, _CallState call_state,
+ grpc_completion_queue *c_completion_queue, _BatchOperationTag tag):
+ call_state.due.remove(tag)
+ if not call_state.due:
+ grpc_call_unref(call_state.c_call)
+ call_state.c_call = NULL
+ state.segregated_call_states.remove(call_state)
+ _destroy_c_completion_queue(c_completion_queue)
+ return True
+ else:
+ return False
+
+
+cdef class SegregatedCall:
+
+ def __cinit__(self, _ChannelState channel_state, _CallState call_state):
+ self._channel_state = channel_state
+ self._call_state = call_state
+
+ def operate(self, operations, tag):
+ return _operate_from_segregated_call(
+ self._channel_state, self._call_state, operations, tag)
+
+ def cancel(self, code, details):
+ _cancel(self._channel_state, self._call_state, code, details)
+
+ def next_event(self):
+ def on_success(tag):
+ _process_segregated_call_tag(
+ self._channel_state, self._call_state, self._c_completion_queue, tag)
+ def on_failure():
+ self._call_state.due.clear()
+ grpc_call_unref(self._call_state.c_call)
+ self._call_state.c_call = NULL
+ self._channel_state.segregated_call_states.remove(self._call_state)
+ _destroy_c_completion_queue(self._c_completion_queue)
+ return _next_call_event(
+ self._channel_state, self._c_completion_queue, on_success, on_failure, None)
+
+
+cdef SegregatedCall _segregated_call(
+ _ChannelState state, int flags, method, host, object deadline,
+ object metadata, CallCredentials credentials, operationses_and_user_tags,
+ object context):
+ cdef _CallState call_state = _CallState()
+ cdef SegregatedCall segregated_call
+ cdef grpc_completion_queue *c_completion_queue
+
+ def on_success(started_tags):
+ state.segregated_call_states.add(call_state)
+
+ with state.condition:
+ if state.open:
+ c_completion_queue = (grpc_completion_queue_create_for_next(NULL))
+ else:
+ raise ValueError('Cannot invoke RPC on closed channel!')
+
+ try:
+ _call(
+ state, call_state, c_completion_queue, on_success, flags, method, host,
+ deadline, credentials, operationses_and_user_tags, metadata,
+ context)
+ except:
+ _destroy_c_completion_queue(c_completion_queue)
+ raise
+
+ segregated_call = SegregatedCall(state, call_state)
+ segregated_call._c_completion_queue = c_completion_queue
+ return segregated_call
+
+
+cdef object _watch_connectivity_state(
+ _ChannelState state, grpc_connectivity_state last_observed_state,
+ object deadline):
+ cdef _ConnectivityTag tag = _ConnectivityTag(object())
+ with state.condition:
+ if state.open:
+ cpython.Py_INCREF(tag)
+ grpc_channel_watch_connectivity_state(
+ state.c_channel, last_observed_state, _timespec_from_time(deadline),
+ state.c_connectivity_completion_queue, <cpython.PyObject *>tag)
+ state.connectivity_due.add(tag)
+ else:
+ raise ValueError('Cannot monitor channel state: %s' % state.closed_reason)
+ completed_tag, event = _latent_event(
+ state.c_connectivity_completion_queue, None)
+ with state.condition:
+ state.connectivity_due.remove(completed_tag)
+ state.condition.notify_all()
+ return event
+
+
+cdef _close(Channel channel, grpc_status_code code, object details,
+ drain_calls):
+ cdef _ChannelState state = channel._state
+ cdef _CallState call_state
+ encoded_details = _encode(details)
+ with state.condition:
+ if state.open:
+ state.open = False
+ state.closed_reason = details
+ for call_state in set(state.integrated_call_states.values()):
+ grpc_call_cancel_with_status(
+ call_state.c_call, code, encoded_details, NULL)
+ for call_state in state.segregated_call_states:
+ grpc_call_cancel_with_status(
+ call_state.c_call, code, encoded_details, NULL)
+ # TODO(https://github.com/grpc/grpc/issues/3064): Cancel connectivity
+ # watching.
+
+ if drain_calls:
+ while not _calls_drained(state):
+ event = channel.next_call_event()
+ if event.completion_type == CompletionType.queue_timeout:
+ continue
+ event.tag(event)
+ else:
+ while state.integrated_call_states:
+ state.condition.wait()
+ while state.connectivity_due:
+ state.condition.wait()
+
+ _destroy_c_completion_queue(state.c_call_completion_queue)
+ _destroy_c_completion_queue(state.c_connectivity_completion_queue)
+ grpc_channel_destroy(state.c_channel)
+ state.c_channel = NULL
+ grpc_shutdown()
+ state.condition.notify_all()
+ else:
+ # Another call to close already completed in the past or is currently
+ # being executed in another thread.
+ while state.c_channel != NULL:
+ state.condition.wait()
+
+
+cdef _calls_drained(_ChannelState state):
+ return not (state.integrated_call_states or state.segregated_call_states or
+ state.connectivity_due)
+
+cdef class Channel:
+
+ def __cinit__(
+ self, bytes target, object arguments,
+ ChannelCredentials channel_credentials):
+ arguments = () if arguments is None else tuple(arguments)
+ fork_handlers_and_grpc_init()
+ self._state = _ChannelState()
+ self._state.c_call_completion_queue = (
+ grpc_completion_queue_create_for_next(NULL))
+ self._state.c_connectivity_completion_queue = (
+ grpc_completion_queue_create_for_next(NULL))
+ self._arguments = arguments
+ cdef _ChannelArgs channel_args = _ChannelArgs(arguments)
+ c_channel_credentials = (
+ channel_credentials.c() if channel_credentials is not None
+ else grpc_insecure_credentials_create())
+ self._state.c_channel = grpc_channel_create(
+ <char *>target, c_channel_credentials, channel_args.c_args())
+ grpc_channel_credentials_release(c_channel_credentials)
+
+ def target(self):
+ cdef char *c_target
+ with self._state.condition:
+ c_target = grpc_channel_get_target(self._state.c_channel)
+ target = <bytes>c_target
+ gpr_free(c_target)
+ return target
+
+ def integrated_call(
+ self, int flags, method, host, object deadline, object metadata,
+ CallCredentials credentials, operationses_and_tags,
+ object context = None):
+ return _integrated_call(
+ self._state, flags, method, host, deadline, metadata, credentials,
+ operationses_and_tags, context)
+
+ def next_call_event(self):
+ def on_success(tag):
+ if tag is not None:
+ _process_integrated_call_tag(self._state, tag)
+ if is_fork_support_enabled():
+ queue_deadline = time.time() + 1.0
+ else:
+ queue_deadline = None
+ # NOTE(gnossen): It is acceptable for on_failure to be None here because
+ # failure conditions can only ever happen on the main thread and this
+ # method is only ever invoked on the channel spin thread.
+ return _next_call_event(self._state, self._state.c_call_completion_queue,
+ on_success, None, queue_deadline)
+
+ def segregated_call(
+ self, int flags, method, host, object deadline, object metadata,
+ CallCredentials credentials, operationses_and_tags,
+ object context = None):
+ return _segregated_call(
+ self._state, flags, method, host, deadline, metadata, credentials,
+ operationses_and_tags, context)
+
+ def check_connectivity_state(self, bint try_to_connect):
+ with self._state.condition:
+ if self._state.open:
+ return grpc_channel_check_connectivity_state(
+ self._state.c_channel, try_to_connect)
+ else:
+ raise ValueError('Cannot invoke RPC: %s' % self._state.closed_reason)
+
+ def watch_connectivity_state(
+ self, grpc_connectivity_state last_observed_state, object deadline):
+ return _watch_connectivity_state(self._state, last_observed_state, deadline)
+
+ def close(self, code, details):
+ _close(self, code, details, False)
+
+ def close_on_fork(self, code, details):
+ _close(self, code, details, True)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channelz.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channelz.pyx.pxi
new file mode 100644
index 0000000000..36c8cd121c
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/channelz.pyx.pxi
@@ -0,0 +1,71 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def channelz_get_top_channels(start_channel_id):
+ cdef char *c_returned_str = grpc_channelz_get_top_channels(
+ start_channel_id,
+ )
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get top channels, please ensure your' \
+ ' start_channel_id==%s is valid' % start_channel_id)
+ return c_returned_str
+
+def channelz_get_servers(start_server_id):
+ cdef char *c_returned_str = grpc_channelz_get_servers(start_server_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get servers, please ensure your' \
+ ' start_server_id==%s is valid' % start_server_id)
+ return c_returned_str
+
+def channelz_get_server(server_id):
+ cdef char *c_returned_str = grpc_channelz_get_server(server_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the server, please ensure your' \
+ ' server_id==%s is valid' % server_id)
+ return c_returned_str
+
+def channelz_get_server_sockets(server_id, start_socket_id, max_results):
+ cdef char *c_returned_str = grpc_channelz_get_server_sockets(
+ server_id,
+ start_socket_id,
+ max_results,
+ )
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get server sockets, please ensure your' \
+ ' server_id==%s and start_socket_id==%s and' \
+ ' max_results==%s is valid' %
+ (server_id, start_socket_id, max_results))
+ return c_returned_str
+
+def channelz_get_channel(channel_id):
+ cdef char *c_returned_str = grpc_channelz_get_channel(channel_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the channel, please ensure your' \
+ ' channel_id==%s is valid' % (channel_id))
+ return c_returned_str
+
+def channelz_get_subchannel(subchannel_id):
+ cdef char *c_returned_str = grpc_channelz_get_subchannel(subchannel_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the subchannel, please ensure your' \
+ ' subchannel_id==%s is valid' % (subchannel_id))
+ return c_returned_str
+
+def channelz_get_socket(socket_id):
+ cdef char *c_returned_str = grpc_channelz_get_socket(socket_id)
+ if c_returned_str == NULL:
+ raise ValueError('Failed to get the socket, please ensure your' \
+ ' socket_id==%s is valid' % (socket_id))
+ return c_returned_str
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
new file mode 100644
index 0000000000..ec13e60f9d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pxd.pxi
@@ -0,0 +1,32 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef int g_interrupt_check_period_ms
+
+cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) except *
+
+
+cdef _interpret_event(grpc_event c_event)
+
+cdef class _LatentEventArg:
+ cdef grpc_completion_queue *c_completion_queue
+ cdef object deadline
+
+cdef class CompletionQueue:
+
+ cdef grpc_completion_queue *c_completion_queue
+ cdef bint is_shutting_down
+ cdef bint is_shutdown
+
+ cdef _interpret_event(self, grpc_event c_event)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
new file mode 100644
index 0000000000..2e4e010773
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -0,0 +1,139 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+g_interrupt_check_period_ms = 200
+
+cdef grpc_event _next(grpc_completion_queue *c_completion_queue, deadline) except *:
+ global g_interrupt_check_period_ms
+ cdef gpr_timespec c_increment
+ cdef gpr_timespec c_timeout
+ cdef gpr_timespec c_deadline
+ c_increment = gpr_time_from_millis(g_interrupt_check_period_ms, GPR_TIMESPAN)
+ if deadline is None:
+ c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
+ else:
+ c_deadline = _timespec_from_time(deadline)
+
+ while True:
+ with nogil:
+ c_timeout = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c_increment)
+ if gpr_time_cmp(c_timeout, c_deadline) > 0:
+ c_timeout = c_deadline
+
+ c_event = grpc_completion_queue_next(c_completion_queue, c_timeout, NULL)
+
+ if (c_event.type != GRPC_QUEUE_TIMEOUT or
+ gpr_time_cmp(c_timeout, c_deadline) == 0):
+ break
+
+ # Handle any signals
+ cpython.PyErr_CheckSignals()
+ return c_event
+
+cdef _interpret_event(grpc_event c_event):
+ cdef _Tag tag
+ if c_event.type == GRPC_QUEUE_TIMEOUT:
+ # TODO(ericgribkoff) Do not coopt ConnectivityEvent here.
+ return None, ConnectivityEvent(GRPC_QUEUE_TIMEOUT, False, None)
+ elif c_event.type == GRPC_QUEUE_SHUTDOWN:
+ # NOTE(nathaniel): For now we coopt ConnectivityEvent here.
+ return None, ConnectivityEvent(GRPC_QUEUE_SHUTDOWN, False, None)
+ else:
+ tag = <_Tag>c_event.tag
+ # We receive event tags only after they've been inc-ref'd elsewhere in
+ # the code.
+ cpython.Py_DECREF(tag)
+ return tag, tag.event(c_event)
+
+cdef _internal_latent_event(_LatentEventArg latent_event_arg):
+ cdef grpc_event c_event = _next(latent_event_arg.c_completion_queue, latent_event_arg.deadline)
+ return _interpret_event(c_event)
+
+cdef _latent_event(grpc_completion_queue *c_completion_queue, object deadline):
+ global g_gevent_activated
+
+ latent_event_arg = _LatentEventArg()
+ latent_event_arg.c_completion_queue = c_completion_queue
+ latent_event_arg.deadline = deadline
+
+ if g_gevent_activated:
+ # For gevent, completion_queue_next is run in a native thread pool.
+ global g_gevent_threadpool
+
+ result = g_gevent_threadpool.apply(_internal_latent_event, (latent_event_arg,))
+ return result
+ else:
+ return _internal_latent_event(latent_event_arg)
+
+cdef class CompletionQueue:
+
+ def __cinit__(self, shutdown_cq=False):
+ cdef grpc_completion_queue_attributes c_attrs
+ fork_handlers_and_grpc_init()
+ if shutdown_cq:
+ c_attrs.version = 1
+ c_attrs.cq_completion_type = GRPC_CQ_NEXT
+ c_attrs.cq_polling_type = GRPC_CQ_NON_LISTENING
+ c_attrs.cq_shutdown_cb = NULL
+ self.c_completion_queue = grpc_completion_queue_create(
+ grpc_completion_queue_factory_lookup(&c_attrs), &c_attrs, NULL);
+ else:
+ self.c_completion_queue = grpc_completion_queue_create_for_next(NULL)
+ self.is_shutting_down = False
+ self.is_shutdown = False
+
+ cdef _interpret_event(self, grpc_event c_event):
+ unused_tag, event = _interpret_event(c_event)
+ if event.completion_type == GRPC_QUEUE_SHUTDOWN:
+ self.is_shutdown = True
+ return event
+
+ def _internal_poll(self, deadline):
+ return self._interpret_event(_next(self.c_completion_queue, deadline))
+
+ # We name this 'poll' to avoid problems with CPython's expectations for
+ # 'special' methods (like next and __next__).
+ def poll(self, deadline=None):
+ global g_gevent_activated
+ if g_gevent_activated:
+ return g_gevent_threadpool.apply(CompletionQueue._internal_poll, (self, deadline))
+ else:
+ return self._internal_poll(deadline)
+
+ def shutdown(self):
+ with nogil:
+ grpc_completion_queue_shutdown(self.c_completion_queue)
+ self.is_shutting_down = True
+
+ def clear(self):
+ if not self.is_shutting_down:
+ raise ValueError('queue must be shutting down to be cleared')
+ while self.poll().type != GRPC_QUEUE_SHUTDOWN:
+ pass
+
+ def __dealloc__(self):
+ cdef gpr_timespec c_deadline
+ c_deadline = gpr_inf_future(GPR_CLOCK_REALTIME)
+ if self.c_completion_queue != NULL:
+ # Ensure shutdown
+ if not self.is_shutting_down:
+ grpc_completion_queue_shutdown(self.c_completion_queue)
+ # Pump the queue (All outstanding calls should have been cancelled)
+ while not self.is_shutdown:
+ event = grpc_completion_queue_next(
+ self.c_completion_queue, c_deadline, NULL)
+ self._interpret_event(event)
+ grpc_completion_queue_destroy(self.c_completion_queue)
+ grpc_shutdown()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pxd.pxi
new file mode 100644
index 0000000000..827f6f17ca
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pxd.pxi
@@ -0,0 +1,117 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class CallCredentials:
+
+ cdef grpc_call_credentials *c(self) except *
+
+ # TODO(https://github.com/grpc/grpc/issues/12531): remove.
+ cdef grpc_call_credentials *c_credentials
+
+
+cdef int _get_metadata(
+ void *state, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) except * with gil
+
+cdef void _destroy(void *state) except * with gil
+
+
+cdef class MetadataPluginCallCredentials(CallCredentials):
+
+ cdef readonly object _metadata_plugin
+ cdef readonly bytes _name
+
+ cdef grpc_call_credentials *c(self) except *
+
+
+cdef grpc_call_credentials *_composition(call_credentialses)
+
+
+cdef class CompositeCallCredentials(CallCredentials):
+
+ cdef readonly tuple _call_credentialses
+
+ cdef grpc_call_credentials *c(self) except *
+
+
+cdef class ChannelCredentials:
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class SSLSessionCacheLRU:
+
+ cdef grpc_ssl_session_cache *_cache
+
+
+cdef class SSLChannelCredentials(ChannelCredentials):
+
+ cdef readonly object _pem_root_certificates
+ cdef readonly object _private_key
+ cdef readonly object _certificate_chain
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class CompositeChannelCredentials(ChannelCredentials):
+
+ cdef readonly tuple _call_credentialses
+ cdef readonly ChannelCredentials _channel_credentials
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class XDSChannelCredentials(ChannelCredentials):
+
+ cdef readonly ChannelCredentials _fallback_credentials
+
+ cdef grpc_channel_credentials *c(self) except *
+
+
+cdef class ServerCertificateConfig:
+
+ cdef grpc_ssl_server_certificate_config *c_cert_config
+ cdef const char *c_pem_root_certs
+ cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs
+ cdef size_t c_ssl_pem_key_cert_pairs_count
+ cdef list references
+
+
+cdef class ServerCredentials:
+
+ cdef grpc_server_credentials *c_credentials
+ cdef grpc_ssl_pem_key_cert_pair *c_ssl_pem_key_cert_pairs
+ cdef size_t c_ssl_pem_key_cert_pairs_count
+ cdef list references
+ # the cert config related state is used only if this credentials is
+ # created with cert config/fetcher
+ cdef object initial_cert_config
+ cdef object cert_config_fetcher
+ # whether C-core has asked for the initial_cert_config
+ cdef bint initial_cert_config_fetched
+
+
+cdef class LocalChannelCredentials(ChannelCredentials):
+
+ cdef grpc_local_connect_type _local_connect_type
+
+
+cdef class ALTSChannelCredentials(ChannelCredentials):
+ cdef grpc_alts_credentials_options *c_options
+
+ cdef grpc_channel_credentials *c(self) except *
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pyx.pxi
new file mode 100644
index 0000000000..27b56aa378
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/credentials.pyx.pxi
@@ -0,0 +1,443 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def _spawn_callback_in_thread(cb_func, args):
+ t = ForkManagedThread(target=cb_func, args=args)
+ t.setDaemon(True)
+ t.start()
+
+async_callback_func = _spawn_callback_in_thread
+
+def set_async_callback_func(callback_func):
+ global async_callback_func
+ async_callback_func = callback_func
+
+def _spawn_callback_async(callback, args):
+ async_callback_func(callback, args)
+
+
+cdef class CallCredentials:
+
+ cdef grpc_call_credentials *c(self) except *:
+ raise NotImplementedError()
+
+
+cdef int _get_metadata(void *state,
+ grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb,
+ void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md,
+ grpc_status_code *status,
+ const char **error_details) except * with gil:
+ cdef size_t metadata_count
+ cdef grpc_metadata *c_metadata
+ def callback(metadata, grpc_status_code status, bytes error_details):
+ cdef char* c_error_details = NULL
+ if error_details is not None:
+ c_error_details = <char*> error_details
+ if status == StatusCode.ok:
+ _store_c_metadata(metadata, &c_metadata, &metadata_count)
+ with nogil:
+ cb(user_data, c_metadata, metadata_count, status, NULL)
+ _release_c_metadata(c_metadata, metadata_count)
+ else:
+ with nogil:
+ cb(user_data, NULL, 0, status, c_error_details)
+ args = context.service_url, context.method_name, callback,
+ plugin = <object>state
+ if plugin._stored_ctx is not None:
+ plugin._stored_ctx.copy().run(_spawn_callback_async, plugin, args)
+ else:
+ _spawn_callback_async(<object>state, args)
+ return 0 # Asynchronous return
+
+
+cdef void _destroy(void *state) except * with gil:
+ cpython.Py_DECREF(<object>state)
+ grpc_shutdown()
+
+
+cdef class MetadataPluginCallCredentials(CallCredentials):
+
+ def __cinit__(self, metadata_plugin, name):
+ self._metadata_plugin = metadata_plugin
+ self._name = name
+
+ cdef grpc_call_credentials *c(self) except *:
+ cdef grpc_metadata_credentials_plugin c_metadata_plugin
+ c_metadata_plugin.get_metadata = _get_metadata
+ c_metadata_plugin.destroy = _destroy
+ c_metadata_plugin.state = <void *>self._metadata_plugin
+ c_metadata_plugin.type = self._name
+ cpython.Py_INCREF(self._metadata_plugin)
+ fork_handlers_and_grpc_init()
+ # TODO(yihuazhang): Expose min_security_level via the Python API so that
+ # applications can decide what minimum security level their plugins require.
+ return grpc_metadata_credentials_create_from_plugin(c_metadata_plugin, GRPC_PRIVACY_AND_INTEGRITY, NULL)
+
+
+cdef grpc_call_credentials *_composition(call_credentialses):
+ call_credentials_iterator = iter(call_credentialses)
+ cdef CallCredentials composition = next(call_credentials_iterator)
+ cdef grpc_call_credentials *c_composition = composition.c()
+ cdef CallCredentials additional_call_credentials
+ cdef grpc_call_credentials *c_additional_call_credentials
+ cdef grpc_call_credentials *c_next_composition
+ for additional_call_credentials in call_credentials_iterator:
+ c_additional_call_credentials = additional_call_credentials.c()
+ c_next_composition = grpc_composite_call_credentials_create(
+ c_composition, c_additional_call_credentials, NULL)
+ grpc_call_credentials_release(c_composition)
+ grpc_call_credentials_release(c_additional_call_credentials)
+ c_composition = c_next_composition
+ return c_composition
+
+
+cdef class CompositeCallCredentials(CallCredentials):
+
+ def __cinit__(self, call_credentialses):
+ self._call_credentialses = call_credentialses
+
+ cdef grpc_call_credentials *c(self) except *:
+ return _composition(self._call_credentialses)
+
+
+cdef class ChannelCredentials:
+
+ cdef grpc_channel_credentials *c(self) except *:
+ raise NotImplementedError()
+
+
+cdef class SSLSessionCacheLRU:
+
+ def __cinit__(self, capacity):
+ fork_handlers_and_grpc_init()
+ self._cache = grpc_ssl_session_cache_create_lru(capacity)
+
+ def __int__(self):
+ return <uintptr_t>self._cache
+
+ def __dealloc__(self):
+ if self._cache != NULL:
+ grpc_ssl_session_cache_destroy(self._cache)
+ grpc_shutdown()
+
+
+cdef class SSLChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, pem_root_certificates, private_key, certificate_chain):
+ if pem_root_certificates is not None and not isinstance(pem_root_certificates, bytes):
+ raise TypeError('expected certificate to be bytes, got %s' % (type(pem_root_certificates)))
+ self._pem_root_certificates = pem_root_certificates
+ self._private_key = private_key
+ self._certificate_chain = certificate_chain
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef const char *c_pem_root_certificates
+ cdef grpc_ssl_pem_key_cert_pair c_pem_key_certificate_pair
+ if self._pem_root_certificates is None:
+ c_pem_root_certificates = NULL
+ else:
+ c_pem_root_certificates = self._pem_root_certificates
+ if self._private_key is None and self._certificate_chain is None:
+ return grpc_ssl_credentials_create(
+ c_pem_root_certificates, NULL, NULL, NULL)
+ else:
+ if self._private_key:
+ c_pem_key_certificate_pair.private_key = self._private_key
+ else:
+ c_pem_key_certificate_pair.private_key = NULL
+ if self._certificate_chain:
+ c_pem_key_certificate_pair.certificate_chain = self._certificate_chain
+ else:
+ c_pem_key_certificate_pair.certificate_chain = NULL
+ return grpc_ssl_credentials_create(
+ c_pem_root_certificates, &c_pem_key_certificate_pair, NULL, NULL)
+
+
+cdef class CompositeChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, call_credentialses, channel_credentials):
+ self._call_credentialses = call_credentialses
+ self._channel_credentials = channel_credentials
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef grpc_channel_credentials *c_channel_credentials
+ c_channel_credentials = self._channel_credentials.c()
+ cdef grpc_call_credentials *c_call_credentials_composition = _composition(
+ self._call_credentialses)
+ cdef grpc_channel_credentials *composition
+ c_composition = grpc_composite_channel_credentials_create(
+ c_channel_credentials, c_call_credentials_composition, NULL)
+ grpc_channel_credentials_release(c_channel_credentials)
+ grpc_call_credentials_release(c_call_credentials_composition)
+ return c_composition
+
+
+cdef class XDSChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, fallback_credentials):
+ self._fallback_credentials = fallback_credentials
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef grpc_channel_credentials *c_fallback_creds = self._fallback_credentials.c()
+ cdef grpc_channel_credentials *xds_creds = grpc_xds_credentials_create(c_fallback_creds)
+ grpc_channel_credentials_release(c_fallback_creds)
+ return xds_creds
+
+
+cdef class ServerCertificateConfig:
+
+ def __cinit__(self):
+ fork_handlers_and_grpc_init()
+ self.c_cert_config = NULL
+ self.c_pem_root_certs = NULL
+ self.c_ssl_pem_key_cert_pairs = NULL
+ self.references = []
+
+ def __dealloc__(self):
+ grpc_ssl_server_certificate_config_destroy(self.c_cert_config)
+ gpr_free(self.c_ssl_pem_key_cert_pairs)
+ grpc_shutdown()
+
+
+cdef class ServerCredentials:
+
+ def __cinit__(self):
+ fork_handlers_and_grpc_init()
+ self.c_credentials = NULL
+ self.references = []
+ self.initial_cert_config = None
+ self.cert_config_fetcher = None
+ self.initial_cert_config_fetched = False
+
+ def __dealloc__(self):
+ if self.c_credentials != NULL:
+ grpc_server_credentials_release(self.c_credentials)
+ grpc_shutdown()
+
+cdef const char* _get_c_pem_root_certs(pem_root_certs):
+ if pem_root_certs is None:
+ return NULL
+ else:
+ return pem_root_certs
+
+cdef grpc_ssl_pem_key_cert_pair* _create_c_ssl_pem_key_cert_pairs(pem_key_cert_pairs):
+ # return a malloc'ed grpc_ssl_pem_key_cert_pair from a _list_ of SslPemKeyCertPair
+ for pair in pem_key_cert_pairs:
+ if not isinstance(pair, SslPemKeyCertPair):
+ raise TypeError("expected pem_key_cert_pairs to be sequence of "
+ "SslPemKeyCertPair")
+ cdef size_t c_ssl_pem_key_cert_pairs_count = len(pem_key_cert_pairs)
+ cdef grpc_ssl_pem_key_cert_pair* c_ssl_pem_key_cert_pairs = NULL
+ with nogil:
+ c_ssl_pem_key_cert_pairs = (
+ <grpc_ssl_pem_key_cert_pair *>gpr_malloc(
+ sizeof(grpc_ssl_pem_key_cert_pair) * c_ssl_pem_key_cert_pairs_count))
+ for i in range(c_ssl_pem_key_cert_pairs_count):
+ c_ssl_pem_key_cert_pairs[i] = (
+ (<SslPemKeyCertPair>pem_key_cert_pairs[i]).c_pair)
+ return c_ssl_pem_key_cert_pairs
+
+def server_credentials_ssl(pem_root_certs, pem_key_cert_pairs,
+ bint force_client_auth):
+ pem_root_certs = str_to_bytes(pem_root_certs)
+ pem_key_cert_pairs = list(pem_key_cert_pairs)
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.references.append(pem_root_certs)
+ credentials.references.append(pem_key_cert_pairs)
+ cdef const char * c_pem_root_certs = _get_c_pem_root_certs(pem_root_certs)
+ credentials.c_ssl_pem_key_cert_pairs_count = len(pem_key_cert_pairs)
+ credentials.c_ssl_pem_key_cert_pairs = _create_c_ssl_pem_key_cert_pairs(pem_key_cert_pairs)
+ cdef grpc_ssl_server_certificate_config *c_cert_config = NULL
+ c_cert_config = grpc_ssl_server_certificate_config_create(
+ c_pem_root_certs, credentials.c_ssl_pem_key_cert_pairs,
+ credentials.c_ssl_pem_key_cert_pairs_count)
+ cdef grpc_ssl_server_credentials_options* c_options = NULL
+ # C-core assumes ownership of c_cert_config
+ c_options = grpc_ssl_server_credentials_create_options_using_config(
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+ if force_client_auth else
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ c_cert_config)
+ # C-core assumes ownership of c_options
+ credentials.c_credentials = grpc_ssl_server_credentials_create_with_options(c_options)
+ return credentials
+
+def server_certificate_config_ssl(pem_root_certs, pem_key_cert_pairs):
+ pem_root_certs = str_to_bytes(pem_root_certs)
+ pem_key_cert_pairs = list(pem_key_cert_pairs)
+ cdef ServerCertificateConfig cert_config = ServerCertificateConfig()
+ cert_config.references.append(pem_root_certs)
+ cert_config.references.append(pem_key_cert_pairs)
+ cert_config.c_pem_root_certs = _get_c_pem_root_certs(pem_root_certs)
+ cert_config.c_ssl_pem_key_cert_pairs_count = len(pem_key_cert_pairs)
+ cert_config.c_ssl_pem_key_cert_pairs = _create_c_ssl_pem_key_cert_pairs(pem_key_cert_pairs)
+ cert_config.c_cert_config = grpc_ssl_server_certificate_config_create(
+ cert_config.c_pem_root_certs, cert_config.c_ssl_pem_key_cert_pairs,
+ cert_config.c_ssl_pem_key_cert_pairs_count)
+ return cert_config
+
+def server_credentials_ssl_dynamic_cert_config(initial_cert_config,
+ cert_config_fetcher,
+ bint force_client_auth):
+ if not isinstance(initial_cert_config, grpc.ServerCertificateConfiguration):
+ raise TypeError(
+ 'initial_cert_config must be a grpc.ServerCertificateConfiguration')
+ if not callable(cert_config_fetcher):
+ raise TypeError('cert_config_fetcher must be callable')
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.initial_cert_config = initial_cert_config
+ credentials.cert_config_fetcher = cert_config_fetcher
+ cdef grpc_ssl_server_credentials_options* c_options = NULL
+ c_options = grpc_ssl_server_credentials_create_options_using_config_fetcher(
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+ if force_client_auth else
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ _server_cert_config_fetcher_wrapper,
+ <void*>credentials)
+ # C-core assumes ownership of c_options
+ credentials.c_credentials = grpc_ssl_server_credentials_create_with_options(c_options)
+ return credentials
+
+cdef grpc_ssl_certificate_config_reload_status _server_cert_config_fetcher_wrapper(
+ void* user_data, grpc_ssl_server_certificate_config **config) with gil:
+ # This is a credentials.ServerCertificateConfig
+ cdef ServerCertificateConfig cert_config = None
+ if not user_data:
+ raise ValueError('internal error: user_data must be specified')
+ credentials = <ServerCredentials>user_data
+ if not credentials.initial_cert_config_fetched:
+ # C-core is asking for the initial cert config
+ credentials.initial_cert_config_fetched = True
+ cert_config = credentials.initial_cert_config._certificate_configuration
+ else:
+ user_cb = credentials.cert_config_fetcher
+ try:
+ cert_config_wrapper = user_cb()
+ except Exception:
+ _LOGGER.exception('Error fetching certificate config')
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
+ if cert_config_wrapper is None:
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED
+ elif not isinstance(
+ cert_config_wrapper, grpc.ServerCertificateConfiguration):
+ _LOGGER.error(
+ 'Error fetching certificate configuration: certificate '
+ 'configuration must be of type grpc.ServerCertificateConfiguration, '
+ 'not %s' % type(cert_config_wrapper).__name__)
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
+ else:
+ cert_config = cert_config_wrapper._certificate_configuration
+ config[0] = <grpc_ssl_server_certificate_config*>cert_config.c_cert_config
+ # our caller will assume ownership of memory, so we have to recreate
+ # a copy of c_cert_config here
+ cert_config.c_cert_config = grpc_ssl_server_certificate_config_create(
+ cert_config.c_pem_root_certs, cert_config.c_ssl_pem_key_cert_pairs,
+ cert_config.c_ssl_pem_key_cert_pairs_count)
+ return GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW
+
+
+class LocalConnectionType:
+ uds = UDS
+ local_tcp = LOCAL_TCP
+
+cdef class LocalChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, grpc_local_connect_type local_connect_type):
+ self._local_connect_type = local_connect_type
+
+ cdef grpc_channel_credentials *c(self) except *:
+ cdef grpc_local_connect_type local_connect_type
+ local_connect_type = self._local_connect_type
+ return grpc_local_credentials_create(local_connect_type)
+
+def channel_credentials_local(grpc_local_connect_type local_connect_type):
+ return LocalChannelCredentials(local_connect_type)
+
+cdef class InsecureChannelCredentials(ChannelCredentials):
+
+ cdef grpc_channel_credentials *c(self) except *:
+ return grpc_insecure_credentials_create()
+
+def channel_credentials_insecure():
+ return InsecureChannelCredentials()
+
+def server_credentials_local(grpc_local_connect_type local_connect_type):
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.c_credentials = grpc_local_server_credentials_create(local_connect_type)
+ return credentials
+
+def xds_server_credentials(ServerCredentials fallback_credentials):
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.c_credentials = grpc_xds_server_credentials_create(fallback_credentials.c_credentials)
+ # NOTE: We do not need to call grpc_server_credentials_release on the
+ # fallback credentials here because this will be done by the __dealloc__
+ # method of its Cython wrapper.
+ return credentials
+
+def insecure_server_credentials():
+ cdef ServerCredentials credentials = ServerCredentials()
+ credentials.c_credentials = grpc_insecure_server_credentials_create()
+ return credentials
+
+cdef class ALTSChannelCredentials(ChannelCredentials):
+
+ def __cinit__(self, list service_accounts):
+ self.c_options = grpc_alts_credentials_client_options_create()
+ cdef str account
+ for account in service_accounts:
+ grpc_alts_credentials_client_options_add_target_service_account(
+ self.c_options, str_to_bytes(account))
+
+ def __dealloc__(self):
+ if self.c_options != NULL:
+ grpc_alts_credentials_options_destroy(self.c_options)
+
+ cdef grpc_channel_credentials *c(self) except *:
+ return grpc_alts_credentials_create(self.c_options)
+
+
+def channel_credentials_alts(list service_accounts):
+ return ALTSChannelCredentials(service_accounts)
+
+
+def server_credentials_alts():
+ cdef ServerCredentials credentials = ServerCredentials()
+ cdef grpc_alts_credentials_options* c_options = grpc_alts_credentials_server_options_create()
+ credentials.c_credentials = grpc_alts_server_credentials_create(c_options)
+ # Options can be destroyed as deep copy was performed.
+ grpc_alts_credentials_options_destroy(c_options)
+ return credentials
+
+
+cdef class ComputeEngineChannelCredentials(ChannelCredentials):
+ cdef grpc_channel_credentials* _c_creds
+ cdef grpc_call_credentials* _call_creds
+
+ def __cinit__(self, CallCredentials call_creds):
+ self._c_creds = NULL
+ self._call_creds = call_creds.c()
+ if self._call_creds == NULL:
+ raise ValueError("Call credentials may not be NULL.")
+
+ cdef grpc_channel_credentials *c(self) except *:
+ self._c_creds = grpc_google_default_credentials_create(self._call_creds)
+ return self._c_creds
+
+
+def channel_credentials_compute_engine(call_creds):
+ return ComputeEngineChannelCredentials(call_creds)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/csds.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/csds.pyx.pxi
new file mode 100644
index 0000000000..c33eb76e47
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/csds.pyx.pxi
@@ -0,0 +1,21 @@
+# Copyright 2021 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def dump_xds_configs():
+ cdef grpc_slice client_config_in_slice
+ with nogil:
+ client_config_in_slice = grpc_dump_xds_configs()
+ cdef bytes result = _slice_bytes(client_config_in_slice)
+ return result
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pxd.pxi
new file mode 100644
index 0000000000..0f173c6bd2
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pxd.pxi
@@ -0,0 +1,47 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class BaseEvent:
+ pass
+
+cdef class ConnectivityEvent(BaseEvent):
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
+
+
+cdef class RequestCallEvent(BaseEvent):
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
+ cdef readonly Call call
+ cdef readonly CallDetails call_details
+ cdef readonly tuple invocation_metadata
+
+
+cdef class BatchOperationEvent(BaseEvent):
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
+ cdef readonly object batch_operations
+
+
+cdef class ServerShutdownEvent(BaseEvent):
+
+ cdef readonly grpc_completion_type completion_type
+ cdef readonly bint success
+ cdef readonly object tag
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pyx.pxi
new file mode 100644
index 0000000000..49cd039255
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/event.pyx.pxi
@@ -0,0 +1,54 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class ConnectivityEvent(BaseEvent):
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
+
+
+cdef class RequestCallEvent(BaseEvent):
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag,
+ Call call, CallDetails call_details, tuple invocation_metadata):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
+ self.call = call
+ self.call_details = call_details
+ self.invocation_metadata = invocation_metadata
+
+
+cdef class BatchOperationEvent(BaseEvent):
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag,
+ object batch_operations):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
+ self.batch_operations = batch_operations
+
+
+cdef class ServerShutdownEvent(BaseEvent):
+
+ def __cinit__(
+ self, grpc_completion_type completion_type, bint success, object tag):
+ self.completion_type = completion_type
+ self.success = success
+ self.tag = tag
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pxd.pxi
new file mode 100644
index 0000000000..a925bdd2e6
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pxd.pxi
@@ -0,0 +1,29 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef extern from "pthread.h" nogil:
+ int pthread_atfork(
+ void (*prepare)() nogil,
+ void (*parent)() nogil,
+ void (*child)() nogil)
+
+
+cdef void __prefork() nogil
+
+
+cdef void __postfork_parent() nogil
+
+
+cdef void __postfork_child() nogil \ No newline at end of file
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pyx.pxi
new file mode 100644
index 0000000000..53657e8b1a
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_posix.pyx.pxi
@@ -0,0 +1,208 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+_AWAIT_THREADS_TIMEOUT_SECONDS = 5
+
+_TRUE_VALUES = ['yes', 'Yes', 'YES', 'true', 'True', 'TRUE', '1']
+
+# This flag enables experimental support within gRPC Python for applications
+# that will fork() without exec(). When enabled, gRPC Python will attempt to
+# pause all of its internally created threads before the fork syscall proceeds.
+#
+# For this to be successful, the application must not have multiple threads of
+# its own calling into gRPC when fork is invoked. Any callbacks from gRPC
+# Python-spawned threads into user code (e.g., callbacks for asynchronous RPCs)
+# must not block and should execute quickly.
+#
+# This flag is not supported on Windows.
+# This flag is also not supported for non-native IO manager.
+_GRPC_ENABLE_FORK_SUPPORT = (
+ os.environ.get('GRPC_ENABLE_FORK_SUPPORT', '0')
+ .lower() in _TRUE_VALUES)
+
+_fork_handler_failed = False
+
+cdef void __prefork() nogil:
+ with gil:
+ global _fork_handler_failed
+ _fork_handler_failed = False
+ with _fork_state.fork_in_progress_condition:
+ _fork_state.fork_in_progress = True
+ if not _fork_state.active_thread_count.await_zero_threads(
+ _AWAIT_THREADS_TIMEOUT_SECONDS):
+ _LOGGER.error(
+ 'Failed to shutdown gRPC Python threads prior to fork. '
+ 'Behavior after fork will be undefined.')
+ _fork_handler_failed = True
+
+
+cdef void __postfork_parent() nogil:
+ with gil:
+ with _fork_state.fork_in_progress_condition:
+ _fork_state.fork_in_progress = False
+ _fork_state.fork_in_progress_condition.notify_all()
+
+
+cdef void __postfork_child() nogil:
+ with gil:
+ try:
+ if _fork_handler_failed:
+ return
+ # Thread could be holding the fork_in_progress_condition inside of
+ # block_if_fork_in_progress() when fork occurs. Reset the lock here.
+ _fork_state.fork_in_progress_condition = threading.Condition()
+ # A thread in return_from_user_request_generator() may hold this lock
+ # when fork occurs.
+ _fork_state.active_thread_count = _ActiveThreadCount()
+ for state_to_reset in _fork_state.postfork_states_to_reset:
+ state_to_reset.reset_postfork_child()
+ _fork_state.postfork_states_to_reset = []
+ _fork_state.fork_epoch += 1
+ for channel in _fork_state.channels:
+ channel._close_on_fork()
+ with _fork_state.fork_in_progress_condition:
+ _fork_state.fork_in_progress = False
+ except:
+ _LOGGER.error('Exiting child due to raised exception')
+ _LOGGER.error(sys.exc_info()[0])
+ os._exit(os.EX_USAGE)
+
+ if grpc_is_initialized() > 0:
+ with gil:
+ _LOGGER.error('Failed to shutdown gRPC Core after fork()')
+ os._exit(os.EX_USAGE)
+
+
+def fork_handlers_and_grpc_init():
+ grpc_init()
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ with _fork_state.fork_handler_registered_lock:
+ if not _fork_state.fork_handler_registered:
+ pthread_atfork(&__prefork, &__postfork_parent, &__postfork_child)
+ _fork_state.fork_handler_registered = True
+
+
+
+
+class ForkManagedThread(object):
+ def __init__(self, target, args=()):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ def managed_target(*args):
+ try:
+ target(*args)
+ finally:
+ _fork_state.active_thread_count.decrement()
+ self._thread = threading.Thread(target=_run_with_context(managed_target), args=args)
+ else:
+ self._thread = threading.Thread(target=_run_with_context(target), args=args)
+
+ def setDaemon(self, daemonic):
+ self._thread.daemon = daemonic
+
+ def start(self):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.active_thread_count.increment()
+ self._thread.start()
+
+ def join(self):
+ self._thread.join()
+
+
+def block_if_fork_in_progress(postfork_state_to_reset=None):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ with _fork_state.fork_in_progress_condition:
+ if not _fork_state.fork_in_progress:
+ return
+ if postfork_state_to_reset is not None:
+ _fork_state.postfork_states_to_reset.append(postfork_state_to_reset)
+ _fork_state.active_thread_count.decrement()
+ _fork_state.fork_in_progress_condition.wait()
+ _fork_state.active_thread_count.increment()
+
+
+def enter_user_request_generator():
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.active_thread_count.decrement()
+
+
+def return_from_user_request_generator():
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.active_thread_count.increment()
+ block_if_fork_in_progress()
+
+
+def get_fork_epoch():
+ return _fork_state.fork_epoch
+
+
+def is_fork_support_enabled():
+ return _GRPC_ENABLE_FORK_SUPPORT
+
+
+def fork_register_channel(channel):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.channels.add(channel)
+
+
+def fork_unregister_channel(channel):
+ if _GRPC_ENABLE_FORK_SUPPORT:
+ _fork_state.channels.discard(channel)
+
+
+class _ActiveThreadCount(object):
+ def __init__(self):
+ self._num_active_threads = 0
+ self._condition = threading.Condition()
+
+ def increment(self):
+ with self._condition:
+ self._num_active_threads += 1
+
+ def decrement(self):
+ with self._condition:
+ self._num_active_threads -= 1
+ if self._num_active_threads == 0:
+ self._condition.notify_all()
+
+ def await_zero_threads(self, timeout_secs):
+ end_time = time.time() + timeout_secs
+ wait_time = timeout_secs
+ with self._condition:
+ while True:
+ if self._num_active_threads > 0:
+ self._condition.wait(wait_time)
+ if self._num_active_threads == 0:
+ return True
+ # Thread count may have increased before this re-obtains the
+ # lock after a notify(). Wait again until timeout_secs has
+ # elapsed.
+ wait_time = end_time - time.time()
+ if wait_time <= 0:
+ return False
+
+
+class _ForkState(object):
+ def __init__(self):
+ self.fork_in_progress_condition = threading.Condition()
+ self.fork_in_progress = False
+ self.postfork_states_to_reset = []
+ self.fork_handler_registered_lock = threading.Lock()
+ self.fork_handler_registered = False
+ self.active_thread_count = _ActiveThreadCount()
+ self.fork_epoch = 0
+ self.channels = set()
+
+
+_fork_state = _ForkState()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_windows.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_windows.pyx.pxi
new file mode 100644
index 0000000000..67aaf4d033
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/fork_windows.pyx.pxi
@@ -0,0 +1,61 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# No-op implementations for Windows.
+
+def fork_handlers_and_grpc_init():
+ grpc_init()
+
+
+class ForkManagedThread(object):
+ def __init__(self, target, args=()):
+ self._thread = threading.Thread(target=_run_with_context(target), args=args)
+
+ def setDaemon(self, daemonic):
+ self._thread.daemon = daemonic
+
+ def start(self):
+ self._thread.start()
+
+ def join(self):
+ self._thread.join()
+
+
+def block_if_fork_in_progress(postfork_state_to_reset=None):
+ pass
+
+
+def enter_user_request_generator():
+ pass
+
+
+def return_from_user_request_generator():
+ pass
+
+
+def get_fork_epoch():
+ return 0
+
+
+def is_fork_support_enabled():
+ return False
+
+
+def fork_register_channel(channel):
+ pass
+
+
+def fork_unregister_channel(channel):
+ pass
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc.pxi
new file mode 100644
index 0000000000..6e04e0cbfd
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc.pxi
@@ -0,0 +1,735 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cimport libc.time
+
+ctypedef ssize_t intptr_t
+ctypedef size_t uintptr_t
+ctypedef signed char int8_t
+ctypedef signed short int16_t
+ctypedef signed int int32_t
+ctypedef signed long long int64_t
+ctypedef unsigned char uint8_t
+ctypedef unsigned short uint16_t
+ctypedef unsigned int uint32_t
+ctypedef unsigned long long uint64_t
+
+# C++ Utilities
+
+# NOTE(lidiz) Unfortunately, we can't use "cimport" here because Cython
+# links it with exception handling. It introduces new dependencies.
+cdef extern from "<queue>" namespace "std" nogil:
+ cdef cppclass queue[T]:
+ queue()
+ bint empty()
+ T& front()
+ T& back()
+ void pop()
+ void push(T&)
+ size_t size()
+
+
+cdef extern from "<mutex>" namespace "std" nogil:
+ cdef cppclass mutex:
+ mutex()
+ void lock()
+ void unlock()
+
+ cdef cppclass unique_lock[Mutex]:
+ unique_lock(Mutex&)
+
+cdef extern from "<condition_variable>" namespace "std" nogil:
+ cdef cppclass condition_variable:
+ condition_variable()
+ void notify_all()
+ void wait(unique_lock[mutex]&)
+
+# gRPC Core Declarations
+
+cdef extern from "grpc/support/alloc.h":
+
+ void *gpr_malloc(size_t size) nogil
+ void *gpr_zalloc(size_t size) nogil
+ void gpr_free(void *ptr) nogil
+ void *gpr_realloc(void *p, size_t size) nogil
+
+
+cdef extern from "grpc/byte_buffer_reader.h":
+
+ struct grpc_byte_buffer_reader:
+ # We don't care about the internals
+ pass
+
+
+cdef extern from "grpc/impl/codegen/grpc_types.h":
+ ctypedef struct grpc_completion_queue_functor:
+ void (*functor_run)(grpc_completion_queue_functor*, int);
+
+
+cdef extern from "grpc/grpc.h":
+
+ ctypedef struct grpc_slice:
+ # don't worry about writing out the members of grpc_slice; we never access
+ # them directly.
+ pass
+
+ grpc_slice grpc_slice_ref(grpc_slice s) nogil
+ void grpc_slice_unref(grpc_slice s) nogil
+ grpc_slice grpc_empty_slice() nogil
+ grpc_slice grpc_slice_new(void *p, size_t len, void (*destroy)(void *)) nogil
+ grpc_slice grpc_slice_new_with_len(
+ void *p, size_t len, void (*destroy)(void *, size_t)) nogil
+ grpc_slice grpc_slice_malloc(size_t length) nogil
+ grpc_slice grpc_slice_from_copied_string(const char *source) nogil
+ grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len) nogil
+ grpc_slice grpc_slice_copy(grpc_slice s) nogil
+
+ # Declare functions for function-like macros (because Cython)...
+ void *grpc_slice_start_ptr "GRPC_SLICE_START_PTR" (grpc_slice s) nogil
+ size_t grpc_slice_length "GRPC_SLICE_LENGTH" (grpc_slice s) nogil
+
+ const int GPR_MS_PER_SEC
+ const int GPR_US_PER_SEC
+ const int GPR_NS_PER_SEC
+
+ ctypedef enum gpr_clock_type:
+ GPR_CLOCK_MONOTONIC
+ GPR_CLOCK_REALTIME
+ GPR_CLOCK_PRECISE
+ GPR_TIMESPAN
+
+ ctypedef struct gpr_timespec:
+ int64_t seconds "tv_sec"
+ int32_t nanoseconds "tv_nsec"
+ gpr_clock_type clock_type
+
+ gpr_timespec gpr_time_0(gpr_clock_type type) nogil
+ gpr_timespec gpr_inf_future(gpr_clock_type type) nogil
+ gpr_timespec gpr_inf_past(gpr_clock_type type) nogil
+
+ gpr_timespec gpr_now(gpr_clock_type clock) nogil
+
+ gpr_timespec gpr_convert_clock_type(gpr_timespec t,
+ gpr_clock_type target_clock) nogil
+
+ gpr_timespec gpr_time_from_millis(int64_t ms, gpr_clock_type type) nogil
+ gpr_timespec gpr_time_from_nanos(int64_t ns, gpr_clock_type type) nogil
+ double gpr_timespec_to_micros(gpr_timespec t) nogil
+
+ gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b) nogil
+
+ int gpr_time_cmp(gpr_timespec a, gpr_timespec b) nogil
+
+ ctypedef struct grpc_byte_buffer:
+ # We don't care about the internals.
+ pass
+
+ grpc_byte_buffer *grpc_raw_byte_buffer_create(grpc_slice *slices,
+ size_t nslices) nogil
+ size_t grpc_byte_buffer_length(grpc_byte_buffer *bb) nogil
+ void grpc_byte_buffer_destroy(grpc_byte_buffer *byte_buffer) nogil
+
+ int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
+ grpc_byte_buffer *buffer) nogil
+ int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader *reader,
+ grpc_slice *slice) nogil
+ void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader *reader) nogil
+
+ ctypedef enum grpc_status_code:
+ GRPC_STATUS_OK
+ GRPC_STATUS_CANCELLED
+ GRPC_STATUS_UNKNOWN
+ GRPC_STATUS_INVALID_ARGUMENT
+ GRPC_STATUS_DEADLINE_EXCEEDED
+ GRPC_STATUS_NOT_FOUND
+ GRPC_STATUS_ALREADY_EXISTS
+ GRPC_STATUS_PERMISSION_DENIED
+ GRPC_STATUS_UNAUTHENTICATED
+ GRPC_STATUS_RESOURCE_EXHAUSTED
+ GRPC_STATUS_FAILED_PRECONDITION
+ GRPC_STATUS_ABORTED
+ GRPC_STATUS_OUT_OF_RANGE
+ GRPC_STATUS_UNIMPLEMENTED
+ GRPC_STATUS_INTERNAL
+ GRPC_STATUS_UNAVAILABLE
+ GRPC_STATUS_DATA_LOSS
+ GRPC_STATUS__DO_NOT_USE
+
+ const char *GRPC_ARG_ENABLE_CENSUS
+ const char *GRPC_ARG_MAX_CONCURRENT_STREAMS
+ const char *GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH
+ const char *GRPC_ARG_MAX_SEND_MESSAGE_LENGTH
+ const char *GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
+ const char *GRPC_ARG_DEFAULT_AUTHORITY
+ const char *GRPC_ARG_PRIMARY_USER_AGENT_STRING
+ const char *GRPC_ARG_SECONDARY_USER_AGENT_STRING
+ const char *GRPC_SSL_TARGET_NAME_OVERRIDE_ARG
+ const char *GRPC_SSL_SESSION_CACHE_ARG
+ const char *_GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM \
+ "GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM"
+ const char *GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL
+ const char *GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET
+
+ const int GRPC_WRITE_BUFFER_HINT
+ const int GRPC_WRITE_NO_COMPRESS
+ const int GRPC_WRITE_USED_MASK
+
+ const int GRPC_INITIAL_METADATA_WAIT_FOR_READY
+ const int GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+ const int GRPC_INITIAL_METADATA_USED_MASK
+
+ const int GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
+
+ ctypedef struct grpc_completion_queue:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_channel:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_server:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_call:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef enum grpc_arg_type:
+ GRPC_ARG_STRING
+ GRPC_ARG_INTEGER
+ GRPC_ARG_POINTER
+
+ ctypedef struct grpc_arg_pointer_vtable:
+ void *(*copy)(void *)
+ void (*destroy)(void *)
+ int (*cmp)(void *, void *)
+
+ ctypedef struct grpc_arg_value_pointer:
+ void *address "p"
+ grpc_arg_pointer_vtable *vtable
+
+ union grpc_arg_value:
+ char *string
+ int integer
+ grpc_arg_value_pointer pointer
+
+ ctypedef struct grpc_arg:
+ grpc_arg_type type
+ char *key
+ grpc_arg_value value
+
+ ctypedef struct grpc_channel_args:
+ size_t arguments_length "num_args"
+ grpc_arg *arguments "args"
+
+ ctypedef enum grpc_stream_compression_level:
+ GRPC_STREAM_COMPRESS_LEVEL_NONE
+ GRPC_STREAM_COMPRESS_LEVEL_LOW
+ GRPC_STREAM_COMPRESS_LEVEL_MED
+ GRPC_STREAM_COMPRESS_LEVEL_HIGH
+
+ ctypedef enum grpc_call_error:
+ GRPC_CALL_OK
+ GRPC_CALL_ERROR
+ GRPC_CALL_ERROR_NOT_ON_SERVER
+ GRPC_CALL_ERROR_NOT_ON_CLIENT
+ GRPC_CALL_ERROR_ALREADY_ACCEPTED
+ GRPC_CALL_ERROR_ALREADY_INVOKED
+ GRPC_CALL_ERROR_NOT_INVOKED
+ GRPC_CALL_ERROR_ALREADY_FINISHED
+ GRPC_CALL_ERROR_TOO_MANY_OPERATIONS
+ GRPC_CALL_ERROR_INVALID_FLAGS
+ GRPC_CALL_ERROR_INVALID_METADATA
+
+ ctypedef enum grpc_cq_completion_type:
+ GRPC_CQ_NEXT
+ GRPC_CQ_PLUCK
+
+ ctypedef enum grpc_cq_polling_type:
+ GRPC_CQ_DEFAULT_POLLING
+ GRPC_CQ_NON_LISTENING
+ GRPC_CQ_NON_POLLING
+
+ ctypedef struct grpc_completion_queue_attributes:
+ int version
+ grpc_cq_completion_type cq_completion_type
+ grpc_cq_polling_type cq_polling_type
+ void* cq_shutdown_cb
+
+ ctypedef enum grpc_connectivity_state:
+ GRPC_CHANNEL_IDLE
+ GRPC_CHANNEL_CONNECTING
+ GRPC_CHANNEL_READY
+ GRPC_CHANNEL_TRANSIENT_FAILURE
+ GRPC_CHANNEL_SHUTDOWN
+
+ ctypedef struct grpc_metadata:
+ grpc_slice key
+ grpc_slice value
+ # ignore the 'internal_data.obfuscated' fields.
+
+ ctypedef enum grpc_completion_type:
+ GRPC_QUEUE_SHUTDOWN
+ GRPC_QUEUE_TIMEOUT
+ GRPC_OP_COMPLETE
+
+ ctypedef struct grpc_event:
+ grpc_completion_type type
+ int success
+ void *tag
+
+ ctypedef struct grpc_metadata_array:
+ size_t count
+ size_t capacity
+ grpc_metadata *metadata
+
+ void grpc_metadata_array_init(grpc_metadata_array *array) nogil
+ void grpc_metadata_array_destroy(grpc_metadata_array *array) nogil
+
+ ctypedef struct grpc_call_details:
+ grpc_slice method
+ grpc_slice host
+ gpr_timespec deadline
+
+ void grpc_call_details_init(grpc_call_details *details) nogil
+ void grpc_call_details_destroy(grpc_call_details *details) nogil
+
+ ctypedef enum grpc_op_type:
+ GRPC_OP_SEND_INITIAL_METADATA
+ GRPC_OP_SEND_MESSAGE
+ GRPC_OP_SEND_CLOSE_FROM_CLIENT
+ GRPC_OP_SEND_STATUS_FROM_SERVER
+ GRPC_OP_RECV_INITIAL_METADATA
+ GRPC_OP_RECV_MESSAGE
+ GRPC_OP_RECV_STATUS_ON_CLIENT
+ GRPC_OP_RECV_CLOSE_ON_SERVER
+
+ ctypedef struct grpc_op_send_initial_metadata_maybe_compression_level:
+ uint8_t is_set
+ grpc_compression_level level
+
+ ctypedef struct grpc_op_data_send_initial_metadata:
+ size_t count
+ grpc_metadata *metadata
+ grpc_op_send_initial_metadata_maybe_compression_level maybe_compression_level
+
+ ctypedef struct grpc_op_data_send_status_from_server:
+ size_t trailing_metadata_count
+ grpc_metadata *trailing_metadata
+ grpc_status_code status
+ grpc_slice *status_details
+
+ ctypedef struct grpc_op_data_recv_status_on_client:
+ grpc_metadata_array *trailing_metadata
+ grpc_status_code *status
+ grpc_slice *status_details
+ char** error_string
+
+ ctypedef struct grpc_op_data_recv_close_on_server:
+ int *cancelled
+
+ ctypedef struct grpc_op_data_send_message:
+ grpc_byte_buffer *send_message
+
+ ctypedef struct grpc_op_data_receive_message:
+ grpc_byte_buffer **receive_message "recv_message"
+
+ ctypedef struct grpc_op_data_receive_initial_metadata:
+ grpc_metadata_array *receive_initial_metadata "recv_initial_metadata"
+
+ union grpc_op_data:
+ grpc_op_data_send_initial_metadata send_initial_metadata
+ grpc_op_data_send_message send_message
+ grpc_op_data_send_status_from_server send_status_from_server
+ grpc_op_data_receive_initial_metadata receive_initial_metadata "recv_initial_metadata"
+ grpc_op_data_receive_message receive_message "recv_message"
+ grpc_op_data_recv_status_on_client receive_status_on_client "recv_status_on_client"
+ grpc_op_data_recv_close_on_server receive_close_on_server "recv_close_on_server"
+
+ ctypedef struct grpc_op:
+ grpc_op_type type "op"
+ uint32_t flags
+ void * reserved
+ grpc_op_data data
+
+ void grpc_dont_init_openssl() nogil
+ void grpc_init() nogil
+ void grpc_shutdown() nogil
+ void grpc_shutdown_blocking() nogil
+ int grpc_is_initialized() nogil
+
+ ctypedef struct grpc_completion_queue_factory:
+ pass
+
+ grpc_completion_queue_factory *grpc_completion_queue_factory_lookup(
+ const grpc_completion_queue_attributes* attributes) nogil
+ grpc_completion_queue *grpc_completion_queue_create(
+ const grpc_completion_queue_factory* factory,
+ const grpc_completion_queue_attributes* attr, void* reserved) nogil
+ grpc_completion_queue *grpc_completion_queue_create_for_next(void *reserved) nogil
+
+ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
+ gpr_timespec deadline,
+ void *reserved) nogil
+ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
+ gpr_timespec deadline,
+ void *reserved) nogil
+ void grpc_completion_queue_shutdown(grpc_completion_queue *cq) nogil
+ void grpc_completion_queue_destroy(grpc_completion_queue *cq) nogil
+
+ grpc_completion_queue *grpc_completion_queue_create_for_callback(
+ grpc_completion_queue_functor* shutdown_callback,
+ void *reserved) nogil
+
+ grpc_call_error grpc_call_start_batch(
+ grpc_call *call, const grpc_op *ops, size_t nops, void *tag,
+ void *reserved) nogil
+ const char* grpc_call_error_to_string(grpc_call_error error) nogil
+ grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) nogil
+ grpc_call_error grpc_call_cancel_with_status(grpc_call *call,
+ grpc_status_code status,
+ const char *description,
+ void *reserved) nogil
+ char *grpc_call_get_peer(grpc_call *call) nogil
+ void grpc_call_unref(grpc_call *call) nogil
+
+ grpc_call *grpc_channel_create_call(
+ grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask,
+ grpc_completion_queue *completion_queue, grpc_slice method,
+ const grpc_slice *host, gpr_timespec deadline, void *reserved) nogil
+ grpc_connectivity_state grpc_channel_check_connectivity_state(
+ grpc_channel *channel, int try_to_connect) nogil
+ void grpc_channel_watch_connectivity_state(
+ grpc_channel *channel, grpc_connectivity_state last_observed_state,
+ gpr_timespec deadline, grpc_completion_queue *cq, void *tag) nogil
+ char *grpc_channel_get_target(grpc_channel *channel) nogil
+ void grpc_channel_destroy(grpc_channel *channel) nogil
+
+ grpc_server *grpc_server_create(
+ const grpc_channel_args *args, void *reserved) nogil
+ grpc_call_error grpc_server_request_call(
+ grpc_server *server, grpc_call **call, grpc_call_details *details,
+ grpc_metadata_array *request_metadata, grpc_completion_queue
+ *cq_bound_to_call, grpc_completion_queue *cq_for_notification, void
+ *tag_new) nogil
+ void grpc_server_register_completion_queue(grpc_server *server,
+ grpc_completion_queue *cq,
+ void *reserved) nogil
+
+ ctypedef struct grpc_server_config_fetcher:
+ pass
+
+ void grpc_server_set_config_fetcher(
+ grpc_server* server, grpc_server_config_fetcher* config_fetcher) nogil
+
+ ctypedef struct grpc_server_xds_status_notifier:
+ void (*on_serving_status_update)(void* user_data, const char* uri,
+ grpc_status_code code,
+ const char* error_message)
+ void* user_data;
+
+ grpc_server_config_fetcher* grpc_server_config_fetcher_xds_create(
+ grpc_server_xds_status_notifier notifier,
+ const grpc_channel_args* args) nogil
+
+
+ void grpc_server_start(grpc_server *server) nogil
+ void grpc_server_shutdown_and_notify(
+ grpc_server *server, grpc_completion_queue *cq, void *tag) nogil
+ void grpc_server_cancel_all_calls(grpc_server *server) nogil
+ void grpc_server_destroy(grpc_server *server) nogil
+
+ char* grpc_channelz_get_top_channels(intptr_t start_channel_id)
+ char* grpc_channelz_get_servers(intptr_t start_server_id)
+ char* grpc_channelz_get_server(intptr_t server_id)
+ char* grpc_channelz_get_server_sockets(intptr_t server_id,
+ intptr_t start_socket_id,
+ intptr_t max_results)
+ char* grpc_channelz_get_channel(intptr_t channel_id)
+ char* grpc_channelz_get_subchannel(intptr_t subchannel_id)
+ char* grpc_channelz_get_socket(intptr_t socket_id)
+
+ grpc_slice grpc_dump_xds_configs() nogil
+
+
+cdef extern from "grpc/grpc_security.h":
+
+ # Declare this as an enum, this is the only way to make it a const in
+ # cython
+ enum: GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX
+
+ ctypedef enum grpc_ssl_roots_override_result:
+ GRPC_SSL_ROOTS_OVERRIDE_OK
+ GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY
+ GRPC_SSL_ROOTS_OVERRIDE_FAILED
+
+ ctypedef enum grpc_ssl_client_certificate_request_type:
+ GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
+ GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY
+ GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY
+ GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
+
+ ctypedef enum grpc_security_level:
+ GRPC_SECURITY_MIN
+ GRPC_SECURITY_NONE = GRPC_SECURITY_MIN
+ GRPC_INTEGRITY_ONLY
+ GRPC_PRIVACY_AND_INTEGRITY
+ GRPC_SECURITY_MAX = GRPC_PRIVACY_AND_INTEGRITY
+
+ ctypedef enum grpc_ssl_certificate_config_reload_status:
+ GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_UNCHANGED
+ GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_NEW
+ GRPC_SSL_CERTIFICATE_CONFIG_RELOAD_FAIL
+
+ ctypedef struct grpc_ssl_server_certificate_config:
+ # We don't care about the internals
+ pass
+
+ ctypedef struct grpc_ssl_server_credentials_options:
+ # We don't care about the internals
+ pass
+
+ grpc_ssl_server_certificate_config * grpc_ssl_server_certificate_config_create(
+ const char *pem_root_certs,
+ const grpc_ssl_pem_key_cert_pair *pem_key_cert_pairs,
+ size_t num_key_cert_pairs)
+
+ void grpc_ssl_server_certificate_config_destroy(grpc_ssl_server_certificate_config *config)
+
+ ctypedef grpc_ssl_certificate_config_reload_status (*grpc_ssl_server_certificate_config_callback)(
+ void *user_data,
+ grpc_ssl_server_certificate_config **config)
+
+ grpc_ssl_server_credentials_options *grpc_ssl_server_credentials_create_options_using_config(
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_certificate_config *certificate_config)
+
+ grpc_ssl_server_credentials_options* grpc_ssl_server_credentials_create_options_using_config_fetcher(
+ grpc_ssl_client_certificate_request_type client_certificate_request,
+ grpc_ssl_server_certificate_config_callback cb,
+ void *user_data)
+
+ grpc_server_credentials *grpc_ssl_server_credentials_create_with_options(
+ grpc_ssl_server_credentials_options *options)
+
+ ctypedef struct grpc_ssl_pem_key_cert_pair:
+ const char *private_key
+ const char *certificate_chain "cert_chain"
+
+ ctypedef struct grpc_channel_credentials:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_call_credentials:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_ssl_session_cache:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct verify_peer_options:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef void (*grpc_ssl_roots_override_callback)(char **pem_root_certs)
+
+ grpc_ssl_session_cache *grpc_ssl_session_cache_create_lru(size_t capacity)
+ void grpc_ssl_session_cache_destroy(grpc_ssl_session_cache* cache)
+
+ void grpc_set_ssl_roots_override_callback(
+ grpc_ssl_roots_override_callback cb) nogil
+
+ grpc_channel_credentials *grpc_google_default_credentials_create(grpc_call_credentials* call_credentials) nogil
+ grpc_channel_credentials *grpc_ssl_credentials_create(
+ const char *pem_root_certs, grpc_ssl_pem_key_cert_pair *pem_key_cert_pair,
+ verify_peer_options *verify_options, void *reserved) nogil
+ grpc_channel_credentials *grpc_composite_channel_credentials_create(
+ grpc_channel_credentials *creds1, grpc_call_credentials *creds2,
+ void *reserved) nogil
+ void grpc_channel_credentials_release(grpc_channel_credentials *creds) nogil
+
+ grpc_channel_credentials *grpc_xds_credentials_create(
+ grpc_channel_credentials *fallback_creds) nogil
+
+ grpc_channel_credentials *grpc_insecure_credentials_create() nogil
+
+ grpc_server_credentials *grpc_xds_server_credentials_create(
+ grpc_server_credentials *fallback_creds) nogil
+
+ grpc_server_credentials *grpc_insecure_server_credentials_create() nogil
+
+ grpc_call_credentials *grpc_composite_call_credentials_create(
+ grpc_call_credentials *creds1, grpc_call_credentials *creds2,
+ void *reserved) nogil
+ grpc_call_credentials *grpc_google_compute_engine_credentials_create(
+ void *reserved) nogil
+ grpc_call_credentials *grpc_service_account_jwt_access_credentials_create(
+ const char *json_key,
+ gpr_timespec token_lifetime, void *reserved) nogil
+ grpc_call_credentials *grpc_google_refresh_token_credentials_create(
+ const char *json_refresh_token, void *reserved) nogil
+ grpc_call_credentials *grpc_google_iam_credentials_create(
+ const char *authorization_token, const char *authority_selector,
+ void *reserved) nogil
+ void grpc_call_credentials_release(grpc_call_credentials *creds) nogil
+
+ grpc_channel *grpc_channel_create(
+ const char *target, grpc_channel_credentials *creds,
+ const grpc_channel_args *args) nogil
+
+ ctypedef struct grpc_server_credentials:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ void grpc_server_credentials_release(grpc_server_credentials *creds) nogil
+
+ int grpc_server_add_http2_port(grpc_server *server, const char *addr,
+ grpc_server_credentials *creds) nogil
+
+ grpc_call_error grpc_call_set_credentials(grpc_call *call,
+ grpc_call_credentials *creds) nogil
+
+ ctypedef struct grpc_auth_context:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ ctypedef struct grpc_auth_metadata_context:
+ const char *service_url
+ const char *method_name
+ const grpc_auth_context *channel_auth_context
+
+ ctypedef void (*grpc_credentials_plugin_metadata_cb)(
+ void *user_data, const grpc_metadata *creds_md, size_t num_creds_md,
+ grpc_status_code status, const char *error_details) nogil
+
+ ctypedef struct grpc_metadata_credentials_plugin:
+ int (*get_metadata)(
+ void *state, grpc_auth_metadata_context context,
+ grpc_credentials_plugin_metadata_cb cb, void *user_data,
+ grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX],
+ size_t *num_creds_md, grpc_status_code *status,
+ const char **error_details) except *
+ void (*destroy)(void *state) except *
+ void *state
+ const char *type
+
+ grpc_call_credentials *grpc_metadata_credentials_create_from_plugin(
+ grpc_metadata_credentials_plugin plugin, grpc_security_level min_security_level, void *reserved) nogil
+
+ ctypedef struct grpc_auth_property_iterator:
+ pass
+
+ ctypedef struct grpc_auth_property:
+ char *name
+ char *value
+ size_t value_length
+
+ grpc_auth_property *grpc_auth_property_iterator_next(
+ grpc_auth_property_iterator *it)
+
+ grpc_auth_property_iterator grpc_auth_context_property_iterator(
+ const grpc_auth_context *ctx)
+
+ grpc_auth_property_iterator grpc_auth_context_peer_identity(
+ const grpc_auth_context *ctx)
+
+ char *grpc_auth_context_peer_identity_property_name(
+ const grpc_auth_context *ctx)
+
+ grpc_auth_property_iterator grpc_auth_context_find_properties_by_name(
+ const grpc_auth_context *ctx, const char *name)
+
+ grpc_auth_context_peer_is_authenticated(
+ const grpc_auth_context *ctx)
+
+ grpc_auth_context *grpc_call_auth_context(grpc_call *call)
+
+ void grpc_auth_context_release(grpc_auth_context *context)
+
+ grpc_channel_credentials *grpc_local_credentials_create(
+ grpc_local_connect_type type)
+ grpc_server_credentials *grpc_local_server_credentials_create(
+ grpc_local_connect_type type)
+
+ ctypedef struct grpc_alts_credentials_options:
+ # We don't care about the internals (and in fact don't know them)
+ pass
+
+ grpc_channel_credentials *grpc_alts_credentials_create(
+ const grpc_alts_credentials_options *options)
+ grpc_server_credentials *grpc_alts_server_credentials_create(
+ const grpc_alts_credentials_options *options)
+
+ grpc_alts_credentials_options* grpc_alts_credentials_client_options_create()
+ grpc_alts_credentials_options* grpc_alts_credentials_server_options_create()
+ void grpc_alts_credentials_options_destroy(grpc_alts_credentials_options *options)
+ void grpc_alts_credentials_client_options_add_target_service_account(grpc_alts_credentials_options *options, const char *service_account)
+
+
+
+cdef extern from "grpc/compression.h":
+
+ ctypedef enum grpc_compression_algorithm:
+ GRPC_COMPRESS_NONE
+ GRPC_COMPRESS_DEFLATE
+ GRPC_COMPRESS_GZIP
+ GRPC_COMPRESS_STREAM_GZIP
+ GRPC_COMPRESS_ALGORITHMS_COUNT
+
+ ctypedef enum grpc_compression_level:
+ GRPC_COMPRESS_LEVEL_NONE
+ GRPC_COMPRESS_LEVEL_LOW
+ GRPC_COMPRESS_LEVEL_MED
+ GRPC_COMPRESS_LEVEL_HIGH
+ GRPC_COMPRESS_LEVEL_COUNT
+
+ ctypedef struct grpc_compression_options:
+ uint32_t enabled_algorithms_bitset
+
+ int grpc_compression_algorithm_parse(
+ grpc_slice value, grpc_compression_algorithm *algorithm) nogil
+ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm,
+ const char **name) nogil
+ grpc_compression_algorithm grpc_compression_algorithm_for_level(
+ grpc_compression_level level, uint32_t accepted_encodings) nogil
+ void grpc_compression_options_init(grpc_compression_options *opts) nogil
+ void grpc_compression_options_enable_algorithm(
+ grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) nogil
+ void grpc_compression_options_disable_algorithm(
+ grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) nogil
+ int grpc_compression_options_is_algorithm_enabled(
+ const grpc_compression_options *opts,
+ grpc_compression_algorithm algorithm) nogil
+
+cdef extern from "grpc/impl/codegen/compression_types.h":
+
+ const char *_GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY \
+ "GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY"
+
+
+cdef extern from "grpc/grpc_security_constants.h":
+ ctypedef enum grpc_local_connect_type:
+ UDS
+ LOCAL_TCP
+
+cdef extern from "src/core/lib/config/config_vars.h" namespace "grpc_core":
+ cdef cppclass ConfigVars:
+ @staticmethod
+ void Reset()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi
new file mode 100644
index 0000000000..baa9fb54a3
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pxd.pxi
@@ -0,0 +1,21 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+g_gevent_threadpool = None
+g_gevent_activated = False
+
+cpdef void gevent_increment_channel_count()
+
+cpdef void gevent_decrement_channel_count()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
new file mode 100644
index 0000000000..41d27df594
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_gevent.pyx.pxi
@@ -0,0 +1,137 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+from libc cimport string
+from cython.operator cimport dereference
+
+from cpython cimport Py_INCREF, Py_DECREF
+
+import atexit
+import errno
+import sys
+
+gevent_hub = None
+g_gevent_pool = None
+g_gevent_threadpool = None
+g_gevent_activated = False
+
+
+cdef queue[void*] g_greenlets_to_run
+cdef condition_variable g_greenlets_cv
+cdef mutex g_greenlets_mu
+cdef bint g_shutdown_greenlets_to_run_queue = False
+cdef int g_channel_count = 0
+
+
+cdef _submit_to_greenlet_queue(object cb, tuple args):
+ cdef tuple to_call = (cb,) + args
+ cdef unique_lock[mutex]* lk
+ Py_INCREF(to_call)
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ g_greenlets_to_run.push(<void*>(to_call))
+ del lk
+ g_greenlets_cv.notify_all()
+
+
+cpdef void gevent_increment_channel_count():
+ global g_channel_count
+ cdef int old_channel_count
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ old_channel_count = g_channel_count
+ g_channel_count += 1
+ del lk
+ if old_channel_count == 0:
+ run_spawn_greenlets()
+
+
+cpdef void gevent_decrement_channel_count():
+ global g_channel_count
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ g_channel_count -= 1
+ if g_channel_count == 0:
+ g_greenlets_cv.notify_all()
+ del lk
+
+
+cdef object await_next_greenlet():
+ cdef unique_lock[mutex]* lk
+ with nogil:
+ # Cython doesn't allow us to do proper stack allocations, so we can't take
+ # advantage of RAII.
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ while not g_shutdown_greenlets_to_run_queue and g_channel_count != 0:
+ if not g_greenlets_to_run.empty():
+ break
+ g_greenlets_cv.wait(dereference(lk))
+ if g_channel_count == 0:
+ del lk
+ return None
+ if g_shutdown_greenlets_to_run_queue:
+ del lk
+ return None
+ cdef object to_call = <object>g_greenlets_to_run.front()
+ Py_DECREF(to_call)
+ g_greenlets_to_run.pop()
+ del lk
+ return to_call
+
+def spawn_greenlets():
+ while True:
+ to_call = g_gevent_threadpool.apply(await_next_greenlet, ())
+ if to_call is None:
+ break
+ fn = to_call[0]
+ args = to_call[1:]
+ fn(*args)
+
+def run_spawn_greenlets():
+ g_gevent_pool.spawn(spawn_greenlets)
+
+def shutdown_await_next_greenlet():
+ global g_shutdown_greenlets_to_run_queue
+ cdef unique_lock[mutex]* lk
+ with nogil:
+ lk = new unique_lock[mutex](g_greenlets_mu)
+ g_shutdown_greenlets_to_run_queue = True
+ del lk
+ g_greenlets_cv.notify_all()
+
+def init_grpc_gevent():
+ # Lazily import gevent
+ global gevent_hub
+ global g_gevent_threadpool
+ global g_gevent_activated
+ global g_interrupt_check_period_ms
+ global g_gevent_pool
+
+ import gevent
+ import gevent.pool
+
+ gevent_hub = gevent.hub
+ g_gevent_threadpool = gevent_hub.get_hub().threadpool
+
+ g_gevent_activated = True
+ g_interrupt_check_period_ms = 2000
+
+ g_gevent_pool = gevent.pool.Group()
+
+
+ set_async_callback_func(_submit_to_greenlet_queue)
+
+ # TODO: Document how this all works.
+ atexit.register(shutdown_await_next_greenlet)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_string.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
new file mode 100644
index 0000000000..5c1e0679a9
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/grpc_string.pyx.pxi
@@ -0,0 +1,51 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# This function will ascii encode unicode string inputs if necessary.
+# In Python3, unicode strings are the default str type.
+cdef bytes str_to_bytes(object s):
+ if s is None or isinstance(s, bytes):
+ return s
+ elif isinstance(s, unicode):
+ return s.encode('ascii')
+ else:
+ raise TypeError('Expected bytes, str, or unicode, not {}'.format(type(s)))
+
+
+# TODO(https://github.com/grpc/grpc/issues/13782): It would be nice for us if
+# the type of metadata that we accept were exactly the same as the type of
+# metadata that we deliver to our users (so "str" for this function's
+# parameter rather than "object"), but would it be nice for our users? Right
+# now we haven't yet heard from enough users to know one way or another.
+cdef bytes _encode(object string_or_none):
+ if string_or_none is None:
+ return b''
+ elif isinstance(string_or_none, (bytes,)):
+ return <bytes>string_or_none
+ elif isinstance(string_or_none, (unicode,)):
+ return string_or_none.encode('utf8')
+ else:
+ raise TypeError('Expected str, not {}'.format(type(string_or_none)))
+
+
+cdef str _decode(bytes bytestring):
+ if isinstance(bytestring, (str,)):
+ return <str>bytestring
+ else:
+ try:
+ return bytestring.decode('utf8')
+ except UnicodeDecodeError:
+ _LOGGER.exception('Invalid encoding on %s', bytestring)
+ return bytestring.decode('latin1')
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pxd.pxi
new file mode 100644
index 0000000000..fc72ac1576
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pxd.pxi
@@ -0,0 +1,26 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef void _store_c_metadata(
+ metadata, grpc_metadata **c_metadata, size_t *c_count) except *
+
+
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count) except *
+
+
+cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice)
+
+
+cdef tuple _metadata(grpc_metadata_array *c_metadata_array)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pyx.pxi
new file mode 100644
index 0000000000..b2dd1e3380
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/metadata.pyx.pxi
@@ -0,0 +1,73 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+
+class InitialMetadataFlags:
+ used_mask = GRPC_INITIAL_METADATA_USED_MASK
+ wait_for_ready = GRPC_INITIAL_METADATA_WAIT_FOR_READY
+ wait_for_ready_explicitly_set = GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET
+
+
+_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
+
+
+cdef void _store_c_metadata(
+ metadata, grpc_metadata **c_metadata, size_t *c_count) except *:
+ if metadata is None:
+ c_count[0] = 0
+ c_metadata[0] = NULL
+ else:
+ metadatum_count = len(metadata)
+ if metadatum_count == 0:
+ c_count[0] = 0
+ c_metadata[0] = NULL
+ else:
+ c_count[0] = metadatum_count
+ c_metadata[0] = <grpc_metadata *>gpr_malloc(
+ metadatum_count * sizeof(grpc_metadata))
+ for index, (key, value) in enumerate(metadata):
+ encoded_key = _encode(key)
+ encoded_value = value if encoded_key[-4:] == b'-bin' else _encode(value)
+ if not isinstance(encoded_value, bytes):
+ raise TypeError('Binary metadata key="%s" expected bytes, got %s' % (
+ key,
+ type(encoded_value)
+ ))
+ c_metadata[0][index].key = _slice_from_bytes(encoded_key)
+ c_metadata[0][index].value = _slice_from_bytes(encoded_value)
+
+
+cdef void _release_c_metadata(grpc_metadata *c_metadata, int count) except *:
+ if 0 < count:
+ for index in range(count):
+ grpc_slice_unref(c_metadata[index].key)
+ grpc_slice_unref(c_metadata[index].value)
+ gpr_free(c_metadata)
+
+
+cdef tuple _metadatum(grpc_slice key_slice, grpc_slice value_slice):
+ cdef bytes key = _slice_bytes(key_slice)
+ cdef bytes value = _slice_bytes(value_slice)
+ return <tuple>_Metadatum(
+ _decode(key), value if key[-4:] == b'-bin' else _decode(value))
+
+
+cdef tuple _metadata(grpc_metadata_array *c_metadata_array):
+ return tuple(
+ _metadatum(
+ c_metadata_array.metadata[index].key,
+ c_metadata_array.metadata[index].value)
+ for index in range(c_metadata_array.count))
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pxd.pxi
new file mode 100644
index 0000000000..c9df32dadf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pxd.pxi
@@ -0,0 +1,111 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Operation:
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+ # TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this!
+ cdef grpc_op c_op
+
+
+cdef class SendInitialMetadataOperation(Operation):
+
+ cdef readonly object _initial_metadata;
+ cdef readonly int _flags
+ cdef grpc_metadata *_c_initial_metadata
+ cdef size_t _c_initial_metadata_count
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class SendMessageOperation(Operation):
+
+ cdef readonly bytes _message
+ cdef readonly int _flags
+ cdef grpc_byte_buffer *_c_message_byte_buffer
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class SendCloseFromClientOperation(Operation):
+
+ cdef readonly int _flags
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class SendStatusFromServerOperation(Operation):
+
+ cdef readonly object _trailing_metadata
+ cdef readonly object _code
+ cdef readonly object _details
+ cdef readonly int _flags
+ cdef grpc_metadata *_c_trailing_metadata
+ cdef size_t _c_trailing_metadata_count
+ cdef grpc_slice _c_details
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveInitialMetadataOperation(Operation):
+
+ cdef readonly int _flags
+ cdef tuple _initial_metadata
+ cdef grpc_metadata_array _c_initial_metadata
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveMessageOperation(Operation):
+
+ cdef readonly int _flags
+ cdef grpc_byte_buffer *_c_message_byte_buffer
+ cdef bytes _message
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveStatusOnClientOperation(Operation):
+
+ cdef readonly int _flags
+ cdef grpc_metadata_array _c_trailing_metadata
+ cdef grpc_status_code _c_code
+ cdef grpc_slice _c_details
+ cdef const char* _c_error_string
+ cdef tuple _trailing_metadata
+ cdef object _code
+ cdef str _details
+ cdef str _error_string
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
+
+
+cdef class ReceiveCloseOnServerOperation(Operation):
+
+ cdef readonly int _flags
+ cdef object _cancelled
+ cdef int _c_cancelled
+
+ cdef void c(self) except *
+ cdef void un_c(self) except *
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pyx.pxi
new file mode 100644
index 0000000000..3f3fd75407
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/operation.pyx.pxi
@@ -0,0 +1,250 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Operation:
+
+ cdef void c(self) except *:
+ raise NotImplementedError()
+
+ cdef void un_c(self) except *:
+ raise NotImplementedError()
+
+
+cdef class SendInitialMetadataOperation(Operation):
+
+ def __cinit__(self, initial_metadata, flags):
+ self._initial_metadata = initial_metadata
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_INITIAL_METADATA
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_INITIAL_METADATA
+ self.c_op.flags = self._flags
+ _store_c_metadata(
+ self._initial_metadata, &self._c_initial_metadata,
+ &self._c_initial_metadata_count)
+ self.c_op.data.send_initial_metadata.metadata = self._c_initial_metadata
+ self.c_op.data.send_initial_metadata.count = self._c_initial_metadata_count
+ self.c_op.data.send_initial_metadata.maybe_compression_level.is_set = 0
+
+ cdef void un_c(self) except *:
+ _release_c_metadata(
+ self._c_initial_metadata, self._c_initial_metadata_count)
+
+
+cdef class SendMessageOperation(Operation):
+
+ def __cinit__(self, bytes message, int flags):
+ if message is None:
+ self._message = b''
+ else:
+ self._message = message
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_MESSAGE
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_MESSAGE
+ self.c_op.flags = self._flags
+ cdef grpc_slice message_slice = grpc_slice_from_copied_buffer(
+ self._message, len(self._message))
+ self._c_message_byte_buffer = grpc_raw_byte_buffer_create(
+ &message_slice, 1)
+ grpc_slice_unref(message_slice)
+ self.c_op.data.send_message.send_message = self._c_message_byte_buffer
+
+ cdef void un_c(self) except *:
+ grpc_byte_buffer_destroy(self._c_message_byte_buffer)
+
+
+cdef class SendCloseFromClientOperation(Operation):
+
+ def __cinit__(self, int flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_CLOSE_FROM_CLIENT
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_CLOSE_FROM_CLIENT
+ self.c_op.flags = self._flags
+
+ cdef void un_c(self) except *:
+ pass
+
+
+cdef class SendStatusFromServerOperation(Operation):
+
+ def __cinit__(self, trailing_metadata, code, object details, int flags):
+ self._trailing_metadata = trailing_metadata
+ self._code = code
+ self._details = details
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_SEND_STATUS_FROM_SERVER
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER
+ self.c_op.flags = self._flags
+ _store_c_metadata(
+ self._trailing_metadata, &self._c_trailing_metadata,
+ &self._c_trailing_metadata_count)
+ self.c_op.data.send_status_from_server.trailing_metadata = (
+ self._c_trailing_metadata)
+ self.c_op.data.send_status_from_server.trailing_metadata_count = (
+ self._c_trailing_metadata_count)
+ self.c_op.data.send_status_from_server.status = self._code
+ self._c_details = _slice_from_bytes(_encode(self._details))
+ self.c_op.data.send_status_from_server.status_details = &self._c_details
+
+ cdef void un_c(self) except *:
+ grpc_slice_unref(self._c_details)
+ _release_c_metadata(
+ self._c_trailing_metadata, self._c_trailing_metadata_count)
+
+
+cdef class ReceiveInitialMetadataOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_INITIAL_METADATA
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_INITIAL_METADATA
+ self.c_op.flags = self._flags
+ grpc_metadata_array_init(&self._c_initial_metadata)
+ self.c_op.data.receive_initial_metadata.receive_initial_metadata = (
+ &self._c_initial_metadata)
+
+ cdef void un_c(self) except *:
+ self._initial_metadata = _metadata(&self._c_initial_metadata)
+ grpc_metadata_array_destroy(&self._c_initial_metadata)
+
+ def initial_metadata(self):
+ return self._initial_metadata
+
+
+cdef class ReceiveMessageOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_MESSAGE
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_MESSAGE
+ self.c_op.flags = self._flags
+ self.c_op.data.receive_message.receive_message = (
+ &self._c_message_byte_buffer)
+
+ cdef void un_c(self) except *:
+ cdef grpc_byte_buffer_reader message_reader
+ cdef bint message_reader_status
+ cdef grpc_slice message_slice
+ cdef size_t message_slice_length
+ cdef void *message_slice_pointer
+ if self._c_message_byte_buffer != NULL:
+ message_reader_status = grpc_byte_buffer_reader_init(
+ &message_reader, self._c_message_byte_buffer)
+ if message_reader_status:
+ message = bytearray()
+ while grpc_byte_buffer_reader_next(&message_reader, &message_slice):
+ message_slice_pointer = grpc_slice_start_ptr(message_slice)
+ message_slice_length = grpc_slice_length(message_slice)
+ message += (<char *>message_slice_pointer)[:message_slice_length]
+ grpc_slice_unref(message_slice)
+ grpc_byte_buffer_reader_destroy(&message_reader)
+ self._message = bytes(message)
+ else:
+ self._message = None
+ grpc_byte_buffer_destroy(self._c_message_byte_buffer)
+ else:
+ self._message = None
+
+ def message(self):
+ return self._message
+
+
+cdef class ReceiveStatusOnClientOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_STATUS_ON_CLIENT
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT
+ self.c_op.flags = self._flags
+ grpc_metadata_array_init(&self._c_trailing_metadata)
+ self.c_op.data.receive_status_on_client.trailing_metadata = (
+ &self._c_trailing_metadata)
+ self.c_op.data.receive_status_on_client.status = (
+ &self._c_code)
+ self.c_op.data.receive_status_on_client.status_details = (
+ &self._c_details)
+ self.c_op.data.receive_status_on_client.error_string = (
+ &self._c_error_string)
+
+ cdef void un_c(self) except *:
+ self._trailing_metadata = _metadata(&self._c_trailing_metadata)
+ grpc_metadata_array_destroy(&self._c_trailing_metadata)
+ self._code = self._c_code
+ self._details = _decode(_slice_bytes(self._c_details))
+ grpc_slice_unref(self._c_details)
+ if self._c_error_string != NULL:
+ self._error_string = _decode(self._c_error_string)
+ gpr_free(<void*>self._c_error_string)
+ else:
+ self._error_string = ""
+
+ def trailing_metadata(self):
+ return self._trailing_metadata
+
+ def code(self):
+ return self._code
+
+ def details(self):
+ return self._details
+
+ def error_string(self):
+ return self._error_string
+
+
+cdef class ReceiveCloseOnServerOperation(Operation):
+
+ def __cinit__(self, flags):
+ self._flags = flags
+
+ def type(self):
+ return GRPC_OP_RECV_CLOSE_ON_SERVER
+
+ cdef void c(self) except *:
+ self.c_op.type = GRPC_OP_RECV_CLOSE_ON_SERVER
+ self.c_op.flags = self._flags
+ self.c_op.data.receive_close_on_server.cancelled = &self._c_cancelled
+
+ cdef void un_c(self) except *:
+ self._cancelled = bool(self._c_cancelled)
+
+ def cancelled(self):
+ return self._cancelled
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi
new file mode 100644
index 0000000000..3182aa54de
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pxd.pxi
@@ -0,0 +1,20 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef extern from "grpc/impl/propagation_bits.h":
+ cdef int _GRPC_PROPAGATE_DEADLINE "GRPC_PROPAGATE_DEADLINE"
+ cdef int _GRPC_PROPAGATE_CENSUS_STATS_CONTEXT "GRPC_PROPAGATE_CENSUS_STATS_CONTEXT"
+ cdef int _GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT "GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT"
+ cdef int _GRPC_PROPAGATE_CANCELLATION "GRPC_PROPAGATE_CANCELLATION"
+ cdef int _GRPC_PROPAGATE_DEFAULTS "GRPC_PROPAGATE_DEFAULTS"
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi
new file mode 100644
index 0000000000..2dcc76a2db
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/propagation_bits.pyx.pxi
@@ -0,0 +1,20 @@
+# Copyright 2018 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class PropagationConstants:
+ GRPC_PROPAGATE_DEADLINE = _GRPC_PROPAGATE_DEADLINE
+ GRPC_PROPAGATE_CENSUS_STATS_CONTEXT = _GRPC_PROPAGATE_CENSUS_STATS_CONTEXT
+ GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT = _GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT
+ GRPC_PROPAGATE_CANCELLATION = _GRPC_PROPAGATE_CANCELLATION
+ GRPC_PROPAGATE_DEFAULTS = _GRPC_PROPAGATE_DEFAULTS
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pxd.pxi
new file mode 100644
index 0000000000..35e1bdb0ae
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -0,0 +1,34 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef bytes _slice_bytes(grpc_slice slice)
+cdef grpc_slice _copy_slice(grpc_slice slice) nogil
+cdef grpc_slice _slice_from_bytes(bytes value) nogil
+
+
+cdef class CallDetails:
+
+ cdef grpc_call_details c_details
+
+
+cdef class SslPemKeyCertPair:
+
+ cdef grpc_ssl_pem_key_cert_pair c_pair
+ cdef readonly object private_key, certificate_chain
+
+
+cdef class CompressionOptions:
+
+ cdef grpc_compression_options c_options
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pyx.pxi
new file mode 100644
index 0000000000..281cf8613f
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -0,0 +1,201 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef bytes _slice_bytes(grpc_slice slice):
+ cdef void *start = grpc_slice_start_ptr(slice)
+ cdef size_t length = grpc_slice_length(slice)
+ return (<const char *>start)[:length]
+
+cdef grpc_slice _copy_slice(grpc_slice slice) nogil:
+ cdef void *start = grpc_slice_start_ptr(slice)
+ cdef size_t length = grpc_slice_length(slice)
+ return grpc_slice_from_copied_buffer(<const char *>start, length)
+
+cdef grpc_slice _slice_from_bytes(bytes value) nogil:
+ cdef const char *value_ptr
+ cdef size_t length
+ with gil:
+ value_ptr = <const char *>value
+ length = len(value)
+ return grpc_slice_from_copied_buffer(value_ptr, length)
+
+
+class ConnectivityState:
+ idle = GRPC_CHANNEL_IDLE
+ connecting = GRPC_CHANNEL_CONNECTING
+ ready = GRPC_CHANNEL_READY
+ transient_failure = GRPC_CHANNEL_TRANSIENT_FAILURE
+ shutdown = GRPC_CHANNEL_SHUTDOWN
+
+
+class ChannelArgKey:
+ enable_census = GRPC_ARG_ENABLE_CENSUS
+ max_concurrent_streams = GRPC_ARG_MAX_CONCURRENT_STREAMS
+ max_receive_message_length = GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH
+ max_send_message_length = GRPC_ARG_MAX_SEND_MESSAGE_LENGTH
+ http2_initial_sequence_number = GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER
+ default_authority = GRPC_ARG_DEFAULT_AUTHORITY
+ primary_user_agent_string = GRPC_ARG_PRIMARY_USER_AGENT_STRING
+ secondary_user_agent_string = GRPC_ARG_SECONDARY_USER_AGENT_STRING
+ ssl_session_cache = GRPC_SSL_SESSION_CACHE_ARG
+ ssl_target_name_override = GRPC_SSL_TARGET_NAME_OVERRIDE_ARG
+
+
+class WriteFlag:
+ buffer_hint = GRPC_WRITE_BUFFER_HINT
+ no_compress = GRPC_WRITE_NO_COMPRESS
+
+
+class StatusCode:
+ ok = GRPC_STATUS_OK
+ cancelled = GRPC_STATUS_CANCELLED
+ unknown = GRPC_STATUS_UNKNOWN
+ invalid_argument = GRPC_STATUS_INVALID_ARGUMENT
+ deadline_exceeded = GRPC_STATUS_DEADLINE_EXCEEDED
+ not_found = GRPC_STATUS_NOT_FOUND
+ already_exists = GRPC_STATUS_ALREADY_EXISTS
+ permission_denied = GRPC_STATUS_PERMISSION_DENIED
+ unauthenticated = GRPC_STATUS_UNAUTHENTICATED
+ resource_exhausted = GRPC_STATUS_RESOURCE_EXHAUSTED
+ failed_precondition = GRPC_STATUS_FAILED_PRECONDITION
+ aborted = GRPC_STATUS_ABORTED
+ out_of_range = GRPC_STATUS_OUT_OF_RANGE
+ unimplemented = GRPC_STATUS_UNIMPLEMENTED
+ internal = GRPC_STATUS_INTERNAL
+ unavailable = GRPC_STATUS_UNAVAILABLE
+ data_loss = GRPC_STATUS_DATA_LOSS
+
+
+class CallError:
+ ok = GRPC_CALL_OK
+ error = GRPC_CALL_ERROR
+ not_on_server = GRPC_CALL_ERROR_NOT_ON_SERVER
+ not_on_client = GRPC_CALL_ERROR_NOT_ON_CLIENT
+ already_accepted = GRPC_CALL_ERROR_ALREADY_ACCEPTED
+ already_invoked = GRPC_CALL_ERROR_ALREADY_INVOKED
+ not_invoked = GRPC_CALL_ERROR_NOT_INVOKED
+ already_finished = GRPC_CALL_ERROR_ALREADY_FINISHED
+ too_many_operations = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS
+ invalid_flags = GRPC_CALL_ERROR_INVALID_FLAGS
+ invalid_metadata = GRPC_CALL_ERROR_INVALID_METADATA
+
+
+class CompletionType:
+ queue_shutdown = GRPC_QUEUE_SHUTDOWN
+ queue_timeout = GRPC_QUEUE_TIMEOUT
+ operation_complete = GRPC_OP_COMPLETE
+
+
+class OperationType:
+ send_initial_metadata = GRPC_OP_SEND_INITIAL_METADATA
+ send_message = GRPC_OP_SEND_MESSAGE
+ send_close_from_client = GRPC_OP_SEND_CLOSE_FROM_CLIENT
+ send_status_from_server = GRPC_OP_SEND_STATUS_FROM_SERVER
+ receive_initial_metadata = GRPC_OP_RECV_INITIAL_METADATA
+ receive_message = GRPC_OP_RECV_MESSAGE
+ receive_status_on_client = GRPC_OP_RECV_STATUS_ON_CLIENT
+ receive_close_on_server = GRPC_OP_RECV_CLOSE_ON_SERVER
+
+GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM= (
+ _GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)
+
+GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY = (
+ _GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY)
+
+class CompressionAlgorithm:
+ none = GRPC_COMPRESS_NONE
+ deflate = GRPC_COMPRESS_DEFLATE
+ gzip = GRPC_COMPRESS_GZIP
+
+
+class CompressionLevel:
+ none = GRPC_COMPRESS_LEVEL_NONE
+ low = GRPC_COMPRESS_LEVEL_LOW
+ medium = GRPC_COMPRESS_LEVEL_MED
+ high = GRPC_COMPRESS_LEVEL_HIGH
+
+
+cdef class CallDetails:
+
+ def __cinit__(self):
+ fork_handlers_and_grpc_init()
+ with nogil:
+ grpc_call_details_init(&self.c_details)
+
+ def __dealloc__(self):
+ with nogil:
+ grpc_call_details_destroy(&self.c_details)
+ grpc_shutdown()
+
+ @property
+ def method(self):
+ return _slice_bytes(self.c_details.method)
+
+ @property
+ def host(self):
+ return _slice_bytes(self.c_details.host)
+
+ @property
+ def deadline(self):
+ return _time_from_timespec(self.c_details.deadline)
+
+
+cdef class SslPemKeyCertPair:
+
+ def __cinit__(self, bytes private_key, bytes certificate_chain):
+ self.private_key = private_key
+ self.certificate_chain = certificate_chain
+ self.c_pair.private_key = self.private_key
+ self.c_pair.certificate_chain = self.certificate_chain
+
+
+cdef class CompressionOptions:
+
+ def __cinit__(self):
+ with nogil:
+ grpc_compression_options_init(&self.c_options)
+
+ def enable_algorithm(self, grpc_compression_algorithm algorithm):
+ with nogil:
+ grpc_compression_options_enable_algorithm(&self.c_options, algorithm)
+
+ def disable_algorithm(self, grpc_compression_algorithm algorithm):
+ with nogil:
+ grpc_compression_options_disable_algorithm(&self.c_options, algorithm)
+
+ def is_algorithm_enabled(self, grpc_compression_algorithm algorithm):
+ cdef int result
+ with nogil:
+ result = grpc_compression_options_is_algorithm_enabled(
+ &self.c_options, algorithm)
+ return result
+
+ def to_channel_arg(self):
+ return (
+ GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET,
+ self.c_options.enabled_algorithms_bitset,
+ )
+
+
+def compression_algorithm_name(grpc_compression_algorithm algorithm):
+ cdef const char* name
+ with nogil:
+ grpc_compression_algorithm_name(algorithm, &name)
+ # Let Cython do the right thing with string casting
+ return name
+
+
+def reset_grpc_config_vars():
+ ConfigVars.Reset()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pxd.pxi
new file mode 100644
index 0000000000..e6e79536bb
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pxd.pxi
@@ -0,0 +1,17 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
+ char **pem_root_certs) nogil
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pyx.pxi
new file mode 100644
index 0000000000..9cc3fd5a21
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/security.pyx.pxi
@@ -0,0 +1,85 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libc.string cimport memcpy
+
+cdef grpc_ssl_roots_override_result ssl_roots_override_callback(
+ char **pem_root_certs) nogil:
+ with gil:
+ temporary_pem_root_certs = ''
+ pem_root_certs[0] = <char *>gpr_malloc(len(temporary_pem_root_certs) + 1)
+ memcpy(
+ pem_root_certs[0], <char *>temporary_pem_root_certs,
+ len(temporary_pem_root_certs))
+ pem_root_certs[0][len(temporary_pem_root_certs)] = '\0'
+
+ return GRPC_SSL_ROOTS_OVERRIDE_OK
+
+
+def peer_identities(Call call):
+ cdef grpc_auth_context* auth_context
+ cdef grpc_auth_property_iterator properties
+ cdef const grpc_auth_property* property
+
+ auth_context = grpc_call_auth_context(call.c_call)
+ if auth_context == NULL:
+ return None
+ properties = grpc_auth_context_peer_identity(auth_context)
+ identities = []
+ while True:
+ property = grpc_auth_property_iterator_next(&properties)
+ if property == NULL:
+ break
+ if property.value != NULL:
+ identities.append(<bytes>(property.value))
+ grpc_auth_context_release(auth_context)
+ return identities if identities else None
+
+def peer_identity_key(Call call):
+ cdef grpc_auth_context* auth_context
+ cdef const char* c_key
+ auth_context = grpc_call_auth_context(call.c_call)
+ if auth_context == NULL:
+ return None
+ c_key = grpc_auth_context_peer_identity_property_name(auth_context)
+ if c_key == NULL:
+ key = None
+ else:
+ key = <bytes> grpc_auth_context_peer_identity_property_name(auth_context)
+ grpc_auth_context_release(auth_context)
+ return key
+
+def auth_context(Call call):
+ cdef grpc_auth_context* auth_context
+ cdef grpc_auth_property_iterator properties
+ cdef const grpc_auth_property* property
+
+ auth_context = grpc_call_auth_context(call.c_call)
+ if auth_context == NULL:
+ return {}
+ properties = grpc_auth_context_property_iterator(auth_context)
+ py_auth_context = {}
+ while True:
+ property = grpc_auth_property_iterator_next(&properties)
+ if property == NULL:
+ break
+ if property.name != NULL and property.value != NULL:
+ key = <bytes> property.name
+ if key in py_auth_context:
+ py_auth_context[key].append(<bytes>(property.value))
+ else:
+ py_auth_context[key] = [<bytes> property.value]
+ grpc_auth_context_release(auth_context)
+ return py_auth_context
+
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pxd.pxi
new file mode 100644
index 0000000000..b89ed99d97
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pxd.pxi
@@ -0,0 +1,29 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cdef class Server:
+
+ cdef grpc_server *c_server
+
+ cdef bint is_started # start has been called
+ cdef bint is_shutting_down # shutdown has been called
+ cdef bint is_shutdown # notification of complete shutdown received
+ # used at dealloc when user forgets to shutdown
+ cdef CompletionQueue backup_shutdown_queue
+ # TODO(https://github.com/grpc/grpc/issues/15662): Elide this.
+ cdef list references
+ cdef list registered_completion_queues
+
+ cdef _c_shutdown(self, CompletionQueue queue, tag)
+ cdef notify_shutdown_complete(self)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pyx.pxi
new file mode 100644
index 0000000000..29dabec61d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -0,0 +1,165 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class Server:
+
+ def __cinit__(self, object arguments, bint xds):
+ fork_handlers_and_grpc_init()
+ self.references = []
+ self.registered_completion_queues = []
+ self.is_started = False
+ self.is_shutting_down = False
+ self.is_shutdown = False
+ self.c_server = NULL
+ cdef _ChannelArgs channel_args = _ChannelArgs(arguments)
+ self.c_server = grpc_server_create(channel_args.c_args(), NULL)
+ cdef grpc_server_xds_status_notifier notifier
+ notifier.on_serving_status_update = NULL
+ notifier.user_data = NULL
+ if xds:
+ grpc_server_set_config_fetcher(self.c_server,
+ grpc_server_config_fetcher_xds_create(notifier, channel_args.c_args()))
+ self.references.append(arguments)
+
+ def request_call(
+ self, CompletionQueue call_queue not None,
+ CompletionQueue server_queue not None, tag):
+ if not self.is_started or self.is_shutting_down:
+ raise ValueError("server must be started and not shutting down")
+ if server_queue not in self.registered_completion_queues:
+ raise ValueError("server_queue must be a registered completion queue")
+ cdef _RequestCallTag request_call_tag = _RequestCallTag(tag)
+ request_call_tag.prepare()
+ cpython.Py_INCREF(request_call_tag)
+ return grpc_server_request_call(
+ self.c_server, &request_call_tag.call.c_call,
+ &request_call_tag.call_details.c_details,
+ &request_call_tag.c_invocation_metadata,
+ call_queue.c_completion_queue, server_queue.c_completion_queue,
+ <cpython.PyObject *>request_call_tag)
+
+ def register_completion_queue(
+ self, CompletionQueue queue not None):
+ if self.is_started:
+ raise ValueError("cannot register completion queues after start")
+ with nogil:
+ grpc_server_register_completion_queue(
+ self.c_server, queue.c_completion_queue, NULL)
+ self.registered_completion_queues.append(queue)
+
+ def start(self, backup_queue=True):
+ """Start the Cython gRPC Server.
+
+ Args:
+ backup_queue: a bool indicates whether to spawn a backup completion
+ queue. In the case that no CQ is bound to the server, and the shutdown
+ of server becomes un-observable.
+ """
+ if self.is_started:
+ raise ValueError("the server has already started")
+ if backup_queue:
+ self.backup_shutdown_queue = CompletionQueue(shutdown_cq=True)
+ self.register_completion_queue(self.backup_shutdown_queue)
+ self.is_started = True
+ with nogil:
+ grpc_server_start(self.c_server)
+ if backup_queue:
+ # Ensure the core has gotten a chance to do the start-up work
+ self.backup_shutdown_queue.poll(deadline=time.time())
+
+ def add_http2_port(self, bytes address,
+ ServerCredentials server_credentials=None):
+ address = str_to_bytes(address)
+ self.references.append(address)
+ cdef int result
+ cdef char *address_c_string = address
+ if server_credentials is not None:
+ self.references.append(server_credentials)
+ with nogil:
+ result = grpc_server_add_http2_port(
+ self.c_server, address_c_string, server_credentials.c_credentials)
+ else:
+ with nogil:
+ creds = grpc_insecure_server_credentials_create()
+ result = grpc_server_add_http2_port(self.c_server,
+ address_c_string, creds)
+ grpc_server_credentials_release(creds)
+ return result
+
+ cdef _c_shutdown(self, CompletionQueue queue, tag):
+ self.is_shutting_down = True
+ cdef _ServerShutdownTag server_shutdown_tag = _ServerShutdownTag(tag, self)
+ cpython.Py_INCREF(server_shutdown_tag)
+ with nogil:
+ grpc_server_shutdown_and_notify(
+ self.c_server, queue.c_completion_queue,
+ <cpython.PyObject *>server_shutdown_tag)
+
+ def shutdown(self, CompletionQueue queue not None, tag):
+ if queue.is_shutting_down:
+ raise ValueError("queue must be live")
+ elif not self.is_started:
+ raise ValueError("the server hasn't started yet")
+ elif self.is_shutting_down:
+ return
+ elif queue not in self.registered_completion_queues:
+ raise ValueError("expected registered completion queue")
+ else:
+ self._c_shutdown(queue, tag)
+
+ cdef notify_shutdown_complete(self):
+ # called only after our server shutdown tag has emerged from a completion
+ # queue.
+ self.is_shutdown = True
+
+ def cancel_all_calls(self):
+ if not self.is_shutting_down:
+ raise UsageError("the server must be shutting down to cancel all calls")
+ elif self.is_shutdown:
+ return
+ else:
+ with nogil:
+ grpc_server_cancel_all_calls(self.c_server)
+
+ # TODO(https://github.com/grpc/grpc/issues/17515) Determine what, if any,
+ # portion of this is safe to call from __dealloc__, and potentially remove
+ # backup_shutdown_queue.
+ def destroy(self):
+ if self.c_server != NULL:
+ if not self.is_started:
+ pass
+ elif self.is_shutdown:
+ pass
+ elif not self.is_shutting_down:
+ if self.backup_shutdown_queue is None:
+ raise InternalError('Server shutdown failed: no completion queue.')
+ else:
+ # the user didn't call shutdown - use our backup queue
+ self._c_shutdown(self.backup_shutdown_queue, None)
+ # and now we wait
+ while not self.is_shutdown:
+ self.backup_shutdown_queue.poll()
+ else:
+ # We're in the process of shutting down, but have not shutdown; can't do
+ # much but repeatedly release the GIL and wait
+ while not self.is_shutdown:
+ time.sleep(0)
+ with nogil:
+ grpc_server_destroy(self.c_server)
+ self.c_server = NULL
+
+ def __dealloc__(self):
+ if self.c_server == NULL:
+ grpc_shutdown()
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pxd.pxi
new file mode 100644
index 0000000000..7af169fa3f
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pxd.pxi
@@ -0,0 +1,58 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _Tag:
+
+ cdef BaseEvent event(self, grpc_event c_event)
+
+
+cdef class _ConnectivityTag(_Tag):
+
+ cdef readonly object _user_tag
+
+ cdef ConnectivityEvent event(self, grpc_event c_event)
+
+
+cdef class _RequestCallTag(_Tag):
+
+ cdef readonly object _user_tag
+ cdef Call call
+ cdef CallDetails call_details
+ cdef grpc_metadata_array c_invocation_metadata
+
+ cdef void prepare(self) except *
+ cdef RequestCallEvent event(self, grpc_event c_event)
+
+
+cdef class _BatchOperationTag(_Tag):
+
+ cdef object _user_tag
+ cdef readonly object _operations
+ cdef readonly object _retained_call
+ cdef grpc_op *c_ops
+ cdef size_t c_nops
+
+ cdef void prepare(self) except *
+ cdef BatchOperationEvent event(self, grpc_event c_event)
+
+
+cdef class _ServerShutdownTag(_Tag):
+
+ cdef readonly object _user_tag
+ # This allows CompletionQueue to notify the Python Server object that the
+ # underlying GRPC core server has shutdown
+ cdef readonly Server _shutting_down_server
+
+ cdef ServerShutdownEvent event(self, grpc_event c_event)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pyx.pxi
new file mode 100644
index 0000000000..7e62ff6389
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/tag.pyx.pxi
@@ -0,0 +1,88 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _Tag:
+
+ cdef BaseEvent event(self, grpc_event c_event):
+ raise NotImplementedError()
+
+
+cdef class _ConnectivityTag(_Tag):
+
+ def __cinit__(self, user_tag):
+ self._user_tag = user_tag
+
+ cdef ConnectivityEvent event(self, grpc_event c_event):
+ return ConnectivityEvent(c_event.type, c_event.success, self._user_tag)
+
+
+cdef class _RequestCallTag(_Tag):
+
+ def __cinit__(self, user_tag):
+ self._user_tag = user_tag
+ self.call = None
+ self.call_details = None
+
+ cdef void prepare(self) except *:
+ self.call = Call()
+ self.call_details = CallDetails()
+ grpc_metadata_array_init(&self.c_invocation_metadata)
+
+ cdef RequestCallEvent event(self, grpc_event c_event):
+ cdef tuple invocation_metadata = _metadata(&self.c_invocation_metadata)
+ grpc_metadata_array_destroy(&self.c_invocation_metadata)
+ return RequestCallEvent(
+ c_event.type, c_event.success, self._user_tag, self.call,
+ self.call_details, invocation_metadata)
+
+
+cdef class _BatchOperationTag:
+
+ def __cinit__(self, user_tag, operations, call):
+ self._user_tag = user_tag
+ self._operations = operations
+ self._retained_call = call
+
+ cdef void prepare(self) except *:
+ cdef Operation operation
+ self.c_nops = 0 if self._operations is None else len(self._operations)
+ if 0 < self.c_nops:
+ self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
+ for index, operation in enumerate(self._operations):
+ operation.c()
+ self.c_ops[index] = operation.c_op
+
+ cdef BatchOperationEvent event(self, grpc_event c_event):
+ cdef Operation operation
+ if 0 < self.c_nops:
+ for operation in self._operations:
+ operation.un_c()
+ gpr_free(self.c_ops)
+ return BatchOperationEvent(
+ c_event.type, c_event.success, self._user_tag, self._operations)
+ else:
+ return BatchOperationEvent(
+ c_event.type, c_event.success, self._user_tag, ())
+
+
+cdef class _ServerShutdownTag(_Tag):
+
+ def __cinit__(self, user_tag, shutting_down_server):
+ self._user_tag = user_tag
+ self._shutting_down_server = shutting_down_server
+
+ cdef ServerShutdownEvent event(self, grpc_event c_event):
+ self._shutting_down_server.notify_shutdown_complete()
+ return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag)
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/thread.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/thread.pyx.pxi
new file mode 100644
index 0000000000..be4cb8b9a8
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/thread.pyx.pxi
@@ -0,0 +1,59 @@
+# Copyright 2020 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+def _contextvars_supported():
+ """Determines if the contextvars module is supported.
+
+ We use a 'try it and see if it works approach' here rather than predicting
+ based on interpreter version in order to support older interpreters that
+ may have a backported module based on, e.g. `threading.local`.
+
+ Returns:
+ A bool indicating whether `contextvars` are supported in the current
+ environment.
+ """
+ try:
+ import contextvars
+ return True
+ except ImportError:
+ return False
+
+
+def _run_with_context(target):
+ """Runs a callable with contextvars propagated.
+
+ If contextvars are supported, the calling thread's context will be copied
+ and propagated. If they are not supported, this function is equivalent
+ to the identity function.
+
+ Args:
+ target: A callable object to wrap.
+ Returns:
+ A callable object with the same signature as `target` but with
+ contextvars propagated.
+ """
+
+
+if _contextvars_supported():
+ import contextvars
+ def _run_with_context(target):
+ ctx = contextvars.copy_context()
+ def _run(*args):
+ ctx.run(target, *args)
+ return _run
+else:
+ def _run_with_context(target):
+ def _run(*args):
+ target(*args)
+ return _run
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pxd.pxi
new file mode 100644
index 0000000000..c46e8a98b0
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pxd.pxi
@@ -0,0 +1,19 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef gpr_timespec _timespec_from_time(object time) except *
+
+
+cdef double _time_from_timespec(gpr_timespec timespec) except *
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pyx.pxi
new file mode 100644
index 0000000000..6d181bb1d6
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/time.pyx.pxi
@@ -0,0 +1,29 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef gpr_timespec _timespec_from_time(object time) except *:
+ if time is None:
+ return gpr_inf_future(GPR_CLOCK_REALTIME)
+ else:
+ return gpr_time_from_nanos(
+ <int64_t>(<double>time * GPR_NS_PER_SEC),
+ GPR_CLOCK_REALTIME,
+ )
+
+
+cdef double _time_from_timespec(gpr_timespec timespec) except *:
+ cdef gpr_timespec real_timespec = gpr_convert_clock_type(
+ timespec, GPR_CLOCK_REALTIME)
+ return gpr_timespec_to_micros(real_timespec) / GPR_US_PER_SEC
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pxd.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pxd.pxi
new file mode 100644
index 0000000000..c96e5cb669
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pxd.pxi
@@ -0,0 +1,23 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef void* _copy_pointer(void* pointer)
+
+cdef void _destroy_pointer(void* pointer)
+
+cdef int _compare_pointer(void* first_pointer, void* second_pointer)
+
+
+cdef grpc_arg_pointer_vtable default_vtable
diff --git a/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pyx.pxi b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pyx.pxi
new file mode 100644
index 0000000000..da4b81bd97
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/_cygrpc/vtable.pyx.pxi
@@ -0,0 +1,36 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(https://github.com/grpc/grpc/issues/15662): Reform this.
+cdef void* _copy_pointer(void* pointer):
+ return pointer
+
+
+# TODO(https://github.com/grpc/grpc/issues/15662): Reform this.
+cdef void _destroy_pointer(void* pointer):
+ pass
+
+
+cdef int _compare_pointer(void* first_pointer, void* second_pointer):
+ if first_pointer < second_pointer:
+ return -1
+ elif first_pointer > second_pointer:
+ return 1
+ else:
+ return 0
+
+cdef grpc_arg_pointer_vtable default_vtable
+default_vtable.copy = &_copy_pointer
+default_vtable.destroy = &_destroy_pointer
+default_vtable.cmp = &_compare_pointer
diff --git a/contrib/python/grpcio/py3/grpc/_cython/cygrpc.pxd b/contrib/python/grpcio/py3/grpc/_cython/cygrpc.pxd
new file mode 100644
index 0000000000..ed04119143
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/cygrpc.pxd
@@ -0,0 +1,50 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+cimport cpython
+
+include "_cygrpc/grpc.pxi"
+
+include "_cygrpc/arguments.pxd.pxi"
+include "_cygrpc/call.pxd.pxi"
+include "_cygrpc/channel.pxd.pxi"
+include "_cygrpc/credentials.pxd.pxi"
+include "_cygrpc/completion_queue.pxd.pxi"
+include "_cygrpc/event.pxd.pxi"
+include "_cygrpc/metadata.pxd.pxi"
+include "_cygrpc/operation.pxd.pxi"
+include "_cygrpc/propagation_bits.pxd.pxi"
+include "_cygrpc/records.pxd.pxi"
+include "_cygrpc/security.pxd.pxi"
+include "_cygrpc/server.pxd.pxi"
+include "_cygrpc/tag.pxd.pxi"
+include "_cygrpc/time.pxd.pxi"
+include "_cygrpc/vtable.pxd.pxi"
+include "_cygrpc/_hooks.pxd.pxi"
+
+
+include "_cygrpc/grpc_gevent.pxd.pxi"
+
+IF UNAME_SYSNAME != "Windows":
+ include "_cygrpc/fork_posix.pxd.pxi"
+
+# Following pxi files are part of the Aio module
+include "_cygrpc/aio/completion_queue.pxd.pxi"
+include "_cygrpc/aio/rpc_status.pxd.pxi"
+include "_cygrpc/aio/grpc_aio.pxd.pxi"
+include "_cygrpc/aio/callback_common.pxd.pxi"
+include "_cygrpc/aio/call.pxd.pxi"
+include "_cygrpc/aio/channel.pxd.pxi"
+include "_cygrpc/aio/server.pxd.pxi"
diff --git a/contrib/python/grpcio/py3/grpc/_cython/cygrpc.pyx b/contrib/python/grpcio/py3/grpc/_cython/cygrpc.pyx
new file mode 100644
index 0000000000..c7925676c3
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_cython/cygrpc.pyx
@@ -0,0 +1,94 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# distutils: language=c++
+
+cimport cpython
+
+import logging
+import os
+import sys
+import threading
+import time
+
+import grpc
+
+try:
+ import asyncio
+except ImportError:
+ # TODO(https://github.com/grpc/grpc/issues/19728) Improve how Aio Cython is
+ # distributed without breaking none compatible Python versions. For now, if
+ # Asyncio package is not available we just skip it.
+ pass
+
+# The only copy of Python logger for the Cython extension
+_LOGGER = logging.getLogger(__name__)
+
+# TODO(atash): figure out why the coverage tool gets confused about the Cython
+# coverage plugin when the following files don't have a '.pxi' suffix.
+include "_cygrpc/grpc_string.pyx.pxi"
+include "_cygrpc/arguments.pyx.pxi"
+include "_cygrpc/call.pyx.pxi"
+include "_cygrpc/channel.pyx.pxi"
+include "_cygrpc/channelz.pyx.pxi"
+include "_cygrpc/csds.pyx.pxi"
+include "_cygrpc/credentials.pyx.pxi"
+include "_cygrpc/completion_queue.pyx.pxi"
+include "_cygrpc/event.pyx.pxi"
+include "_cygrpc/metadata.pyx.pxi"
+include "_cygrpc/operation.pyx.pxi"
+include "_cygrpc/propagation_bits.pyx.pxi"
+include "_cygrpc/records.pyx.pxi"
+include "_cygrpc/security.pyx.pxi"
+include "_cygrpc/server.pyx.pxi"
+include "_cygrpc/tag.pyx.pxi"
+include "_cygrpc/time.pyx.pxi"
+include "_cygrpc/vtable.pyx.pxi"
+include "_cygrpc/_hooks.pyx.pxi"
+
+include "_cygrpc/grpc_gevent.pyx.pxi"
+
+include "_cygrpc/thread.pyx.pxi"
+
+IF UNAME_SYSNAME == "Windows":
+ include "_cygrpc/fork_windows.pyx.pxi"
+ELSE:
+ include "_cygrpc/fork_posix.pyx.pxi"
+
+# Following pxi files are part of the Aio module
+include "_cygrpc/aio/common.pyx.pxi"
+include "_cygrpc/aio/rpc_status.pyx.pxi"
+include "_cygrpc/aio/completion_queue.pyx.pxi"
+include "_cygrpc/aio/callback_common.pyx.pxi"
+include "_cygrpc/aio/grpc_aio.pyx.pxi"
+include "_cygrpc/aio/call.pyx.pxi"
+include "_cygrpc/aio/channel.pyx.pxi"
+include "_cygrpc/aio/server.pyx.pxi"
+
+
+#
+# initialize gRPC
+#
+cdef extern from "Python.h":
+
+ int PyEval_InitThreads()
+
+cdef _initialize():
+ # We have Python callbacks called by c-core threads, this ensures the GIL
+ # is initialized.
+ PyEval_InitThreads()
+ import ssl
+ grpc_dont_init_openssl()
+ # Load Arcadia certs in ComputePemRootCerts and do not override here.
+
+_initialize()
diff --git a/contrib/python/grpcio/py3/grpc/_grpcio_metadata.py b/contrib/python/grpcio/py3/grpc/_grpcio_metadata.py
new file mode 100644
index 0000000000..99395f78b2
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_grpcio_metadata.py
@@ -0,0 +1 @@
+__version__ = """1.54.2""" \ No newline at end of file
diff --git a/contrib/python/grpcio/py3/grpc/_interceptor.py b/contrib/python/grpcio/py3/grpc/_interceptor.py
new file mode 100644
index 0000000000..865ff17d35
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_interceptor.py
@@ -0,0 +1,638 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Implementation of gRPC Python interceptors."""
+
+import collections
+import sys
+import types
+from typing import Any, Callable, Optional, Sequence, Tuple, Union
+
+import grpc
+
+from ._typing import DeserializingFunction
+from ._typing import DoneCallbackType
+from ._typing import MetadataType
+from ._typing import RequestIterableType
+from ._typing import SerializingFunction
+
+
+class _ServicePipeline(object):
+ interceptors: Tuple[grpc.ServerInterceptor]
+
+ def __init__(self, interceptors: Sequence[grpc.ServerInterceptor]):
+ self.interceptors = tuple(interceptors)
+
+ def _continuation(self, thunk: Callable, index: int) -> Callable:
+ return lambda context: self._intercept_at(thunk, index, context)
+
+ def _intercept_at(
+ self, thunk: Callable, index: int,
+ context: grpc.HandlerCallDetails) -> grpc.RpcMethodHandler:
+ if index < len(self.interceptors):
+ interceptor = self.interceptors[index]
+ thunk = self._continuation(thunk, index + 1)
+ return interceptor.intercept_service(thunk, context)
+ else:
+ return thunk(context)
+
+ def execute(self, thunk: Callable,
+ context: grpc.HandlerCallDetails) -> grpc.RpcMethodHandler:
+ return self._intercept_at(thunk, 0, context)
+
+
+def service_pipeline(
+ interceptors: Optional[Sequence[grpc.ServerInterceptor]]
+) -> Optional[_ServicePipeline]:
+ return _ServicePipeline(interceptors) if interceptors else None
+
+
+class _ClientCallDetails(
+ collections.namedtuple('_ClientCallDetails',
+ ('method', 'timeout', 'metadata', 'credentials',
+ 'wait_for_ready', 'compression')),
+ grpc.ClientCallDetails):
+ pass
+
+
+def _unwrap_client_call_details(
+ call_details: grpc.ClientCallDetails,
+ default_details: grpc.ClientCallDetails
+) -> Tuple[str, float, MetadataType, grpc.CallCredentials, bool,
+ grpc.Compression]:
+ try:
+ method = call_details.method # pytype: disable=attribute-error
+ except AttributeError:
+ method = default_details.method # pytype: disable=attribute-error
+
+ try:
+ timeout = call_details.timeout # pytype: disable=attribute-error
+ except AttributeError:
+ timeout = default_details.timeout # pytype: disable=attribute-error
+
+ try:
+ metadata = call_details.metadata # pytype: disable=attribute-error
+ except AttributeError:
+ metadata = default_details.metadata # pytype: disable=attribute-error
+
+ try:
+ credentials = call_details.credentials # pytype: disable=attribute-error
+ except AttributeError:
+ credentials = default_details.credentials # pytype: disable=attribute-error
+
+ try:
+ wait_for_ready = call_details.wait_for_ready # pytype: disable=attribute-error
+ except AttributeError:
+ wait_for_ready = default_details.wait_for_ready # pytype: disable=attribute-error
+
+ try:
+ compression = call_details.compression # pytype: disable=attribute-error
+ except AttributeError:
+ compression = default_details.compression # pytype: disable=attribute-error
+
+ return method, timeout, metadata, credentials, wait_for_ready, compression
+
+
+class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
+ _exception: Exception
+ _traceback: types.TracebackType
+
+ def __init__(self, exception: Exception, traceback: types.TracebackType):
+ super(_FailureOutcome, self).__init__()
+ self._exception = exception
+ self._traceback = traceback
+
+ def initial_metadata(self) -> Optional[MetadataType]:
+ return None
+
+ def trailing_metadata(self) -> Optional[MetadataType]:
+ return None
+
+ def code(self) -> Optional[grpc.StatusCode]:
+ return grpc.StatusCode.INTERNAL
+
+ def details(self) -> Optional[str]:
+ return 'Exception raised while intercepting the RPC'
+
+ def cancel(self) -> bool:
+ return False
+
+ def cancelled(self) -> bool:
+ return False
+
+ def is_active(self) -> bool:
+ return False
+
+ def time_remaining(self) -> Optional[float]:
+ return None
+
+ def running(self) -> bool:
+ return False
+
+ def done(self) -> bool:
+ return True
+
+ def result(self, ignored_timeout: Optional[float] = None):
+ raise self._exception
+
+ def exception(
+ self,
+ ignored_timeout: Optional[float] = None) -> Optional[Exception]:
+ return self._exception
+
+ def traceback(
+ self,
+ ignored_timeout: Optional[float] = None
+ ) -> Optional[types.TracebackType]:
+ return self._traceback
+
+ def add_callback(self, unused_callback) -> bool:
+ return False
+
+ def add_done_callback(self, fn: DoneCallbackType) -> None:
+ fn(self)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ raise self._exception
+
+ def next(self):
+ return self.__next__()
+
+
+class _UnaryOutcome(grpc.Call, grpc.Future):
+ _response: Any
+ _call: grpc.Call
+
+ def __init__(self, response: Any, call: grpc.Call):
+ self._response = response
+ self._call = call
+
+ def initial_metadata(self) -> Optional[MetadataType]:
+ return self._call.initial_metadata()
+
+ def trailing_metadata(self) -> Optional[MetadataType]:
+ return self._call.trailing_metadata()
+
+ def code(self) -> Optional[grpc.StatusCode]:
+ return self._call.code()
+
+ def details(self) -> Optional[str]:
+ return self._call.details()
+
+ def is_active(self) -> bool:
+ return self._call.is_active()
+
+ def time_remaining(self) -> Optional[float]:
+ return self._call.time_remaining()
+
+ def cancel(self) -> bool:
+ return self._call.cancel()
+
+ def add_callback(self, callback) -> bool:
+ return self._call.add_callback(callback)
+
+ def cancelled(self) -> bool:
+ return False
+
+ def running(self) -> bool:
+ return False
+
+ def done(self) -> bool:
+ return True
+
+ def result(self, ignored_timeout: Optional[float] = None):
+ return self._response
+
+ def exception(self, ignored_timeout: Optional[float] = None):
+ return None
+
+ def traceback(self, ignored_timeout: Optional[float] = None):
+ return None
+
+ def add_done_callback(self, fn: DoneCallbackType) -> None:
+ fn(self)
+
+
+class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
+ _thunk: Callable
+ _method: str
+ _interceptor: grpc.UnaryUnaryClientInterceptor
+
+ def __init__(self, thunk: Callable, method: str,
+ interceptor: grpc.UnaryUnaryClientInterceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None) -> Any:
+ response, ignored_call = self._with_call(request,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+ return response
+
+ def _with_call(
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[Any, grpc.Call]:
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ try:
+ response, call = self._thunk(new_method).with_call(
+ request,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+ return _UnaryOutcome(response, call)
+ except grpc.RpcError as rpc_error:
+ return rpc_error
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+ call = self._interceptor.intercept_unary_unary(continuation,
+ client_call_details,
+ request)
+ return call.result(), call
+
+ def with_call(
+ self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[Any, grpc.Call]:
+ return self._with_call(request,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+
+ def future(self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None) -> Any:
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method).future(
+ request,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_unary_unary(
+ continuation, client_call_details, request)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
+ _thunk: Callable
+ _method: str
+ _interceptor: grpc.UnaryStreamClientInterceptor
+
+ def __init__(self, thunk: Callable, method: str,
+ interceptor: grpc.UnaryStreamClientInterceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request: Any,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method)(request,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_unary_stream(
+ continuation, client_call_details, request)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
+ _thunk: Callable
+ _method: str
+ _interceptor: grpc.StreamUnaryClientInterceptor
+
+ def __init__(self, thunk: Callable, method: str,
+ interceptor: grpc.StreamUnaryClientInterceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request_iterator: RequestIterableType,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None) -> Any:
+ response, ignored_call = self._with_call(request_iterator,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+ return response
+
+ def _with_call(
+ self,
+ request_iterator: RequestIterableType,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[Any, grpc.Call]:
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request_iterator):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ try:
+ response, call = self._thunk(new_method).with_call(
+ request_iterator,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+ return _UnaryOutcome(response, call)
+ except grpc.RpcError as rpc_error:
+ return rpc_error
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+ call = self._interceptor.intercept_stream_unary(continuation,
+ client_call_details,
+ request_iterator)
+ return call.result(), call
+
+ def with_call(
+ self,
+ request_iterator: RequestIterableType,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> Tuple[Any, grpc.Call]:
+ return self._with_call(request_iterator,
+ timeout=timeout,
+ metadata=metadata,
+ credentials=credentials,
+ wait_for_ready=wait_for_ready,
+ compression=compression)
+
+ def future(self,
+ request_iterator: RequestIterableType,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None) -> Any:
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request_iterator):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method).future(
+ request_iterator,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_stream_unary(
+ continuation, client_call_details, request_iterator)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
+ _thunk: Callable
+ _method: str
+ _interceptor: grpc.StreamStreamClientInterceptor
+
+ def __init__(self, thunk: Callable, method: str,
+ interceptor: grpc.StreamStreamClientInterceptor):
+ self._thunk = thunk
+ self._method = method
+ self._interceptor = interceptor
+
+ def __call__(self,
+ request_iterator: RequestIterableType,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None):
+ client_call_details = _ClientCallDetails(self._method, timeout,
+ metadata, credentials,
+ wait_for_ready, compression)
+
+ def continuation(new_details, request_iterator):
+ (new_method, new_timeout, new_metadata, new_credentials,
+ new_wait_for_ready,
+ new_compression) = (_unwrap_client_call_details(
+ new_details, client_call_details))
+ return self._thunk(new_method)(request_iterator,
+ timeout=new_timeout,
+ metadata=new_metadata,
+ credentials=new_credentials,
+ wait_for_ready=new_wait_for_ready,
+ compression=new_compression)
+
+ try:
+ return self._interceptor.intercept_stream_stream(
+ continuation, client_call_details, request_iterator)
+ except Exception as exception: # pylint:disable=broad-except
+ return _FailureOutcome(exception, sys.exc_info()[2])
+
+
+class _Channel(grpc.Channel):
+ _channel: grpc.Channel
+ _interceptor: Union[grpc.UnaryUnaryClientInterceptor,
+ grpc.UnaryStreamClientInterceptor,
+ grpc.StreamStreamClientInterceptor,
+ grpc.StreamUnaryClientInterceptor]
+
+ def __init__(self, channel: grpc.Channel,
+ interceptor: Union[grpc.UnaryUnaryClientInterceptor,
+ grpc.UnaryStreamClientInterceptor,
+ grpc.StreamStreamClientInterceptor,
+ grpc.StreamUnaryClientInterceptor]):
+ self._channel = channel
+ self._interceptor = interceptor
+
+ def subscribe(self,
+ callback: Callable,
+ try_to_connect: Optional[bool] = False):
+ self._channel.subscribe(callback, try_to_connect=try_to_connect)
+
+ def unsubscribe(self, callback: Callable):
+ self._channel.unsubscribe(callback)
+
+ def unary_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.UnaryUnaryMultiCallable:
+ thunk = lambda m: self._channel.unary_unary(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.UnaryUnaryClientInterceptor):
+ return _UnaryUnaryMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def unary_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.UnaryStreamMultiCallable:
+ thunk = lambda m: self._channel.unary_stream(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.UnaryStreamClientInterceptor):
+ return _UnaryStreamMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def stream_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.StreamUnaryMultiCallable:
+ thunk = lambda m: self._channel.stream_unary(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.StreamUnaryClientInterceptor):
+ return _StreamUnaryMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def stream_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> grpc.StreamStreamMultiCallable:
+ thunk = lambda m: self._channel.stream_stream(m, request_serializer,
+ response_deserializer)
+ if isinstance(self._interceptor, grpc.StreamStreamClientInterceptor):
+ return _StreamStreamMultiCallable(thunk, method, self._interceptor)
+ else:
+ return thunk(method)
+
+ def _close(self):
+ self._channel.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._close()
+ return False
+
+ def close(self):
+ self._channel.close()
+
+
+def intercept_channel(
+ channel: grpc.Channel,
+ *interceptors: Optional[Sequence[Union[grpc.UnaryUnaryClientInterceptor,
+ grpc.UnaryStreamClientInterceptor,
+ grpc.StreamStreamClientInterceptor,
+ grpc.StreamUnaryClientInterceptor]]]
+) -> grpc.Channel:
+ for interceptor in reversed(list(interceptors)):
+ if not isinstance(interceptor, grpc.UnaryUnaryClientInterceptor) and \
+ not isinstance(interceptor, grpc.UnaryStreamClientInterceptor) and \
+ not isinstance(interceptor, grpc.StreamUnaryClientInterceptor) and \
+ not isinstance(interceptor, grpc.StreamStreamClientInterceptor):
+ raise TypeError('interceptor must be '
+ 'grpc.UnaryUnaryClientInterceptor or '
+ 'grpc.UnaryStreamClientInterceptor or '
+ 'grpc.StreamUnaryClientInterceptor or '
+ 'grpc.StreamStreamClientInterceptor or ')
+ channel = _Channel(channel, interceptor)
+ return channel
diff --git a/contrib/python/grpcio/py3/grpc/_plugin_wrapping.py b/contrib/python/grpcio/py3/grpc/_plugin_wrapping.py
new file mode 100644
index 0000000000..942264cdae
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_plugin_wrapping.py
@@ -0,0 +1,121 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import logging
+import threading
+from typing import Callable, Optional, Type
+
+import grpc
+from grpc import _common
+from grpc._cython import cygrpc
+from grpc._typing import MetadataType
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class _AuthMetadataContext(
+ collections.namedtuple('AuthMetadataContext', (
+ 'service_url',
+ 'method_name',
+ )), grpc.AuthMetadataContext):
+ pass
+
+
+class _CallbackState(object):
+
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.called = False
+ self.exception = None
+
+
+class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback):
+ _state: _CallbackState
+ _callback: Callable
+
+ def __init__(self, state: _CallbackState, callback: Callable):
+ self._state = state
+ self._callback = callback
+
+ def __call__(self, metadata: MetadataType,
+ error: Optional[Type[BaseException]]):
+ with self._state.lock:
+ if self._state.exception is None:
+ if self._state.called:
+ raise RuntimeError(
+ 'AuthMetadataPluginCallback invoked more than once!')
+ else:
+ self._state.called = True
+ else:
+ raise RuntimeError(
+ 'AuthMetadataPluginCallback raised exception "{}"!'.format(
+ self._state.exception))
+ if error is None:
+ self._callback(metadata, cygrpc.StatusCode.ok, None)
+ else:
+ self._callback(None, cygrpc.StatusCode.internal,
+ _common.encode(str(error)))
+
+
+class _Plugin(object):
+ _metadata_plugin: grpc.AuthMetadataPlugin
+
+ def __init__(self, metadata_plugin: grpc.AuthMetadataPlugin):
+ self._metadata_plugin = metadata_plugin
+ self._stored_ctx = None
+
+ try:
+ import contextvars # pylint: disable=wrong-import-position
+
+ # The plugin may be invoked on a thread created by Core, which will not
+ # have the context propagated. This context is stored and installed in
+ # the thread invoking the plugin.
+ self._stored_ctx = contextvars.copy_context()
+ except ImportError:
+ # Support versions predating contextvars.
+ pass
+
+ def __call__(self, service_url: str, method_name: str, callback: Callable):
+ context = _AuthMetadataContext(_common.decode(service_url),
+ _common.decode(method_name))
+ callback_state = _CallbackState()
+ try:
+ self._metadata_plugin(
+ context, _AuthMetadataPluginCallback(callback_state, callback))
+ except Exception as exception: # pylint: disable=broad-except
+ _LOGGER.exception(
+ 'AuthMetadataPluginCallback "%s" raised exception!',
+ self._metadata_plugin)
+ with callback_state.lock:
+ callback_state.exception = exception
+ if callback_state.called:
+ return
+ callback(None, cygrpc.StatusCode.internal,
+ _common.encode(str(exception)))
+
+
+def metadata_plugin_call_credentials(
+ metadata_plugin: grpc.AuthMetadataPlugin,
+ name: Optional[str]) -> grpc.CallCredentials:
+ if name is None:
+ try:
+ effective_name = metadata_plugin.__name__
+ except AttributeError:
+ effective_name = metadata_plugin.__class__.__name__
+ else:
+ effective_name = name
+ return grpc.CallCredentials(
+ cygrpc.MetadataPluginCallCredentials(_Plugin(metadata_plugin),
+ _common.encode(effective_name)))
diff --git a/contrib/python/grpcio/py3/grpc/_runtime_protos.py b/contrib/python/grpcio/py3/grpc/_runtime_protos.py
new file mode 100644
index 0000000000..fcc37038da
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_runtime_protos.py
@@ -0,0 +1,159 @@
+# Copyright 2020 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import types
+from typing import Tuple, Union
+
+_REQUIRED_SYMBOLS = ("_protos", "_services", "_protos_and_services")
+_MINIMUM_VERSION = (3, 5, 0)
+
+_UNINSTALLED_TEMPLATE = "Install the grpcio-tools package (1.32.0+) to use the {} function."
+_VERSION_ERROR_TEMPLATE = "The {} function is only on available on Python 3.X interpreters."
+
+
+def _has_runtime_proto_symbols(mod: types.ModuleType) -> bool:
+ return all(hasattr(mod, sym) for sym in _REQUIRED_SYMBOLS)
+
+
+def _is_grpc_tools_importable() -> bool:
+ try:
+ import grpc_tools # pylint: disable=unused-import # pytype: disable=import-error
+ return True
+ except ImportError as e:
+ # NOTE: It's possible that we're encountering a transitive ImportError, so
+ # we check for that and re-raise if so.
+ if "grpc_tools" not in e.args[0]:
+ raise
+ return False
+
+
+def _call_with_lazy_import(
+ fn_name: str, protobuf_path: str
+) -> Union[types.ModuleType, Tuple[types.ModuleType, types.ModuleType]]:
+ """Calls one of the three functions, lazily importing grpc_tools.
+
+ Args:
+ fn_name: The name of the function to import from grpc_tools.protoc.
+ protobuf_path: The path to import.
+
+ Returns:
+ The appropriate module object.
+ """
+ if sys.version_info < _MINIMUM_VERSION:
+ raise NotImplementedError(_VERSION_ERROR_TEMPLATE.format(fn_name))
+ else:
+ if not _is_grpc_tools_importable():
+ raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name))
+ import grpc_tools.protoc # pytype: disable=import-error
+ if _has_runtime_proto_symbols(grpc_tools.protoc):
+ fn = getattr(grpc_tools.protoc, '_' + fn_name)
+ return fn(protobuf_path)
+ else:
+ raise NotImplementedError(_UNINSTALLED_TEMPLATE.format(fn_name))
+
+
+def protos(protobuf_path): # pylint: disable=unused-argument
+ """Returns a module generated by the indicated .proto file.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ Use this function to retrieve classes corresponding to message
+ definitions in the .proto file.
+
+ To inspect the contents of the returned module, use the dir function.
+ For example:
+
+ ```
+ protos = grpc.protos("foo.proto")
+ print(dir(protos))
+ ```
+
+ The returned module object corresponds to the _pb2.py file generated
+ by protoc. The path is expected to be relative to an entry on sys.path
+ and all transitive dependencies of the file should also be resolveable
+ from an entry on sys.path.
+
+ To completely disable the machinery behind this function, set the
+ GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
+
+ Args:
+ protobuf_path: The path to the .proto file on the filesystem. This path
+ must be resolveable from an entry on sys.path and so must all of its
+ transitive dependencies.
+
+ Returns:
+ A module object corresponding to the message code for the indicated
+ .proto file. Equivalent to a generated _pb2.py file.
+ """
+ return _call_with_lazy_import("protos", protobuf_path)
+
+
+def services(protobuf_path): # pylint: disable=unused-argument
+ """Returns a module generated by the indicated .proto file.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ Use this function to retrieve classes and functions corresponding to
+ service definitions in the .proto file, including both stub and servicer
+ definitions.
+
+ To inspect the contents of the returned module, use the dir function.
+ For example:
+
+ ```
+ services = grpc.services("foo.proto")
+ print(dir(services))
+ ```
+
+ The returned module object corresponds to the _pb2_grpc.py file generated
+ by protoc. The path is expected to be relative to an entry on sys.path
+ and all transitive dependencies of the file should also be resolveable
+ from an entry on sys.path.
+
+ To completely disable the machinery behind this function, set the
+ GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
+
+ Args:
+ protobuf_path: The path to the .proto file on the filesystem. This path
+ must be resolveable from an entry on sys.path and so must all of its
+ transitive dependencies.
+
+ Returns:
+ A module object corresponding to the stub/service code for the indicated
+ .proto file. Equivalent to a generated _pb2_grpc.py file.
+ """
+ return _call_with_lazy_import("services", protobuf_path)
+
+
+def protos_and_services(protobuf_path): # pylint: disable=unused-argument
+ """Returns a 2-tuple of modules corresponding to protos and services.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ The return value of this function is equivalent to a call to protos and a
+ call to services.
+
+ To completely disable the machinery behind this function, set the
+ GRPC_PYTHON_DISABLE_DYNAMIC_STUBS environment variable to "true".
+
+ Args:
+ protobuf_path: The path to the .proto file on the filesystem. This path
+ must be resolveable from an entry on sys.path and so must all of its
+ transitive dependencies.
+
+ Returns:
+ A 2-tuple of module objects corresponding to (protos(path), services(path)).
+ """
+ return _call_with_lazy_import("protos_and_services", protobuf_path)
diff --git a/contrib/python/grpcio/py3/grpc/_server.py b/contrib/python/grpcio/py3/grpc/_server.py
new file mode 100644
index 0000000000..49ad0d2f5e
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_server.py
@@ -0,0 +1,1141 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Service-side implementation of gRPC Python."""
+
+from __future__ import annotations
+
+import collections
+from concurrent import futures
+import enum
+import logging
+import threading
+import time
+import traceback
+from typing import (Any, Callable, Iterable, Iterator, List, Mapping, Optional,
+ Sequence, Set, Tuple, Union)
+
+import grpc # pytype: disable=pyi-error
+from grpc import _common # pytype: disable=pyi-error
+from grpc import _compression # pytype: disable=pyi-error
+from grpc import _interceptor # pytype: disable=pyi-error
+from grpc._cython import cygrpc
+from grpc._typing import ArityAgnosticMethodHandler
+from grpc._typing import ChannelArgumentType
+from grpc._typing import DeserializingFunction
+from grpc._typing import MetadataType
+from grpc._typing import NullaryCallbackType
+from grpc._typing import ResponseType
+from grpc._typing import SerializingFunction
+from grpc._typing import ServerCallbackTag
+from grpc._typing import ServerTagCallbackType
+
+_LOGGER = logging.getLogger(__name__)
+
+_SHUTDOWN_TAG = 'shutdown'
+_REQUEST_CALL_TAG = 'request_call'
+
+_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
+_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
+_RECEIVE_MESSAGE_TOKEN = 'receive_message'
+_SEND_MESSAGE_TOKEN = 'send_message'
+_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
+ 'send_initial_metadata * send_message')
+_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
+_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
+ 'send_initial_metadata * send_status_from_server')
+
+_OPEN = 'open'
+_CLOSED = 'closed'
+_CANCELLED = 'cancelled'
+
+_EMPTY_FLAGS = 0
+
+_DEALLOCATED_SERVER_CHECK_PERIOD_S = 1.0
+_INF_TIMEOUT = 1e9
+
+
+def _serialized_request(request_event: cygrpc.BaseEvent) -> bytes:
+ return request_event.batch_operations[0].message()
+
+
+def _application_code(code: grpc.StatusCode) -> cygrpc.StatusCode:
+ cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
+ return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
+
+
+def _completion_code(state: _RPCState) -> cygrpc.StatusCode:
+ if state.code is None:
+ return cygrpc.StatusCode.ok
+ else:
+ return _application_code(state.code)
+
+
+def _abortion_code(state: _RPCState,
+ code: cygrpc.StatusCode) -> cygrpc.StatusCode:
+ if state.code is None:
+ return code
+ else:
+ return _application_code(state.code)
+
+
+def _details(state: _RPCState) -> bytes:
+ return b'' if state.details is None else state.details
+
+
+class _HandlerCallDetails(
+ collections.namedtuple('_HandlerCallDetails', (
+ 'method',
+ 'invocation_metadata',
+ )), grpc.HandlerCallDetails):
+ pass
+
+
+class _RPCState(object):
+ condition: threading.Condition
+ due = Set[str]
+ request: Any
+ client: str
+ initial_metadata_allowed: bool
+ compression_algorithm: Optional[grpc.Compression]
+ disable_next_compression: bool
+ trailing_metadata: Optional[MetadataType]
+ code: Optional[grpc.StatusCode]
+ details: Optional[bytes]
+ statused: bool
+ rpc_errors: List[Exception]
+ callbacks: Optional[List[NullaryCallbackType]]
+ aborted: bool
+
+ def __init__(self):
+ self.condition = threading.Condition()
+ self.due = set()
+ self.request = None
+ self.client = _OPEN
+ self.initial_metadata_allowed = True
+ self.compression_algorithm = None
+ self.disable_next_compression = False
+ self.trailing_metadata = None
+ self.code = None
+ self.details = None
+ self.statused = False
+ self.rpc_errors = []
+ self.callbacks = []
+ self.aborted = False
+
+
+def _raise_rpc_error(state: _RPCState) -> None:
+ rpc_error = grpc.RpcError()
+ state.rpc_errors.append(rpc_error)
+ raise rpc_error
+
+
+def _possibly_finish_call(state: _RPCState,
+ token: str) -> ServerTagCallbackType:
+ state.due.remove(token)
+ if not _is_rpc_state_active(state) and not state.due:
+ callbacks = state.callbacks
+ state.callbacks = None
+ return state, callbacks
+ else:
+ return None, ()
+
+
+def _send_status_from_server(state: _RPCState, token: str) -> ServerCallbackTag:
+
+ def send_status_from_server(unused_send_status_from_server_event):
+ with state.condition:
+ return _possibly_finish_call(state, token)
+
+ return send_status_from_server
+
+
+def _get_initial_metadata(
+ state: _RPCState,
+ metadata: Optional[MetadataType]) -> Optional[MetadataType]:
+ with state.condition:
+ if state.compression_algorithm:
+ compression_metadata = (
+ _compression.compression_algorithm_to_metadata(
+ state.compression_algorithm),)
+ if metadata is None:
+ return compression_metadata
+ else:
+ return compression_metadata + tuple(metadata)
+ else:
+ return metadata
+
+
+def _get_initial_metadata_operation(
+ state: _RPCState, metadata: Optional[MetadataType]) -> cygrpc.Operation:
+ operation = cygrpc.SendInitialMetadataOperation(
+ _get_initial_metadata(state, metadata), _EMPTY_FLAGS)
+ return operation
+
+
+def _abort(state: _RPCState, call: cygrpc.Call, code: cygrpc.StatusCode,
+ details: bytes) -> None:
+ if state.client is not _CANCELLED:
+ effective_code = _abortion_code(state, code)
+ effective_details = details if state.details is None else state.details
+ if state.initial_metadata_allowed:
+ operations = (
+ _get_initial_metadata_operation(state, None),
+ cygrpc.SendStatusFromServerOperation(state.trailing_metadata,
+ effective_code,
+ effective_details,
+ _EMPTY_FLAGS),
+ )
+ token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
+ else:
+ operations = (cygrpc.SendStatusFromServerOperation(
+ state.trailing_metadata, effective_code, effective_details,
+ _EMPTY_FLAGS),)
+ token = _SEND_STATUS_FROM_SERVER_TOKEN
+ call.start_server_batch(operations,
+ _send_status_from_server(state, token))
+ state.statused = True
+ state.due.add(token)
+
+
+def _receive_close_on_server(state: _RPCState) -> ServerCallbackTag:
+
+ def receive_close_on_server(receive_close_on_server_event):
+ with state.condition:
+ if receive_close_on_server_event.batch_operations[0].cancelled():
+ state.client = _CANCELLED
+ elif state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
+
+ return receive_close_on_server
+
+
+def _receive_message(
+ state: _RPCState, call: cygrpc.Call,
+ request_deserializer: Optional[DeserializingFunction]
+) -> ServerCallbackTag:
+
+ def receive_message(receive_message_event):
+ serialized_request = _serialized_request(receive_message_event)
+ if serialized_request is None:
+ with state.condition:
+ if state.client is _OPEN:
+ state.client = _CLOSED
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+ else:
+ request = _common.deserialize(serialized_request,
+ request_deserializer)
+ with state.condition:
+ if request is None:
+ _abort(state, call, cygrpc.StatusCode.internal,
+ b'Exception deserializing request!')
+ else:
+ state.request = request
+ state.condition.notify_all()
+ return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
+
+ return receive_message
+
+
+def _send_initial_metadata(state: _RPCState) -> ServerCallbackTag:
+
+ def send_initial_metadata(unused_send_initial_metadata_event):
+ with state.condition:
+ return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
+
+ return send_initial_metadata
+
+
+def _send_message(state: _RPCState, token: str) -> ServerCallbackTag:
+
+ def send_message(unused_send_message_event):
+ with state.condition:
+ state.condition.notify_all()
+ return _possibly_finish_call(state, token)
+
+ return send_message
+
+
+class _Context(grpc.ServicerContext):
+ _rpc_event: cygrpc.BaseEvent
+ _state: _RPCState
+ request_deserializer: Optional[DeserializingFunction]
+
+ def __init__(self, rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ request_deserializer: Optional[DeserializingFunction]):
+ self._rpc_event = rpc_event
+ self._state = state
+ self._request_deserializer = request_deserializer
+
+ def is_active(self) -> bool:
+ with self._state.condition:
+ return _is_rpc_state_active(self._state)
+
+ def time_remaining(self) -> float:
+ return max(self._rpc_event.call_details.deadline - time.time(), 0)
+
+ def cancel(self) -> None:
+ self._rpc_event.call.cancel()
+
+ def add_callback(self, callback: NullaryCallbackType) -> bool:
+ with self._state.condition:
+ if self._state.callbacks is None:
+ return False
+ else:
+ self._state.callbacks.append(callback)
+ return True
+
+ def disable_next_message_compression(self) -> None:
+ with self._state.condition:
+ self._state.disable_next_compression = True
+
+ def invocation_metadata(self) -> Optional[MetadataType]:
+ return self._rpc_event.invocation_metadata
+
+ def peer(self) -> str:
+ return _common.decode(self._rpc_event.call.peer())
+
+ def peer_identities(self) -> Optional[Sequence[bytes]]:
+ return cygrpc.peer_identities(self._rpc_event.call)
+
+ def peer_identity_key(self) -> Optional[str]:
+ id_key = cygrpc.peer_identity_key(self._rpc_event.call)
+ return id_key if id_key is None else _common.decode(id_key)
+
+ def auth_context(self) -> Mapping[str, Sequence[bytes]]:
+ auth_context = cygrpc.auth_context(self._rpc_event.call)
+ auth_context_dict = {} if auth_context is None else auth_context
+ return {
+ _common.decode(key): value
+ for key, value in auth_context_dict.items()
+ }
+
+ def set_compression(self, compression: grpc.Compression) -> None:
+ with self._state.condition:
+ self._state.compression_algorithm = compression
+
+ def send_initial_metadata(self, initial_metadata: MetadataType) -> None:
+ with self._state.condition:
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ else:
+ if self._state.initial_metadata_allowed:
+ operation = _get_initial_metadata_operation(
+ self._state, initial_metadata)
+ self._rpc_event.call.start_server_batch(
+ (operation,), _send_initial_metadata(self._state))
+ self._state.initial_metadata_allowed = False
+ self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
+ else:
+ raise ValueError('Initial metadata no longer allowed!')
+
+ def set_trailing_metadata(self, trailing_metadata: MetadataType) -> None:
+ with self._state.condition:
+ self._state.trailing_metadata = trailing_metadata
+
+ def trailing_metadata(self) -> Optional[MetadataType]:
+ return self._state.trailing_metadata
+
+ def abort(self, code: grpc.StatusCode, details: str) -> None:
+ # treat OK like other invalid arguments: fail the RPC
+ if code == grpc.StatusCode.OK:
+ _LOGGER.error(
+ 'abort() called with StatusCode.OK; returning UNKNOWN')
+ code = grpc.StatusCode.UNKNOWN
+ details = ''
+ with self._state.condition:
+ self._state.code = code
+ self._state.details = _common.encode(details)
+ self._state.aborted = True
+ raise Exception()
+
+ def abort_with_status(self, status: grpc.Status) -> None:
+ self._state.trailing_metadata = status.trailing_metadata
+ self.abort(status.code, status.details)
+
+ def set_code(self, code: grpc.StatusCode) -> None:
+ with self._state.condition:
+ self._state.code = code
+
+ def code(self) -> grpc.StatusCode:
+ return self._state.code
+
+ def set_details(self, details: str) -> None:
+ with self._state.condition:
+ self._state.details = _common.encode(details)
+
+ def details(self) -> bytes:
+ return self._state.details
+
+ def _finalize_state(self) -> None:
+ pass
+
+
+class _RequestIterator(object):
+ _state: _RPCState
+ _call: cygrpc.Call
+ _request_deserializer: Optional[DeserializingFunction]
+
+ def __init__(self, state: _RPCState, call: cygrpc.Call,
+ request_deserializer: Optional[DeserializingFunction]):
+ self._state = state
+ self._call = call
+ self._request_deserializer = request_deserializer
+
+ def _raise_or_start_receive_message(self) -> None:
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif not _is_rpc_state_active(self._state):
+ raise StopIteration()
+ else:
+ self._call.start_server_batch(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
+ _receive_message(self._state, self._call,
+ self._request_deserializer))
+ self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
+
+ def _look_for_request(self) -> Any:
+ if self._state.client is _CANCELLED:
+ _raise_rpc_error(self._state)
+ elif (self._state.request is None and
+ _RECEIVE_MESSAGE_TOKEN not in self._state.due):
+ raise StopIteration()
+ else:
+ request = self._state.request
+ self._state.request = None
+ return request
+
+ raise AssertionError() # should never run
+
+ def _next(self) -> Any:
+ with self._state.condition:
+ self._raise_or_start_receive_message()
+ while True:
+ self._state.condition.wait()
+ request = self._look_for_request()
+ if request is not None:
+ return request
+
+ def __iter__(self) -> _RequestIterator:
+ return self
+
+ def __next__(self) -> Any:
+ return self._next()
+
+ def next(self) -> Any:
+ return self._next()
+
+
+def _unary_request(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ request_deserializer: Optional[DeserializingFunction]
+) -> Callable[[], Any]:
+
+ def unary_request():
+ with state.condition:
+ if not _is_rpc_state_active(state):
+ return None
+ else:
+ rpc_event.call.start_server_batch(
+ (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
+ _receive_message(state, rpc_event.call,
+ request_deserializer))
+ state.due.add(_RECEIVE_MESSAGE_TOKEN)
+ while True:
+ state.condition.wait()
+ if state.request is None:
+ if state.client is _CLOSED:
+ details = '"{}" requires exactly one request message.'.format(
+ rpc_event.call_details.method)
+ _abort(state, rpc_event.call,
+ cygrpc.StatusCode.unimplemented,
+ _common.encode(details))
+ return None
+ elif state.client is _CANCELLED:
+ return None
+ else:
+ request = state.request
+ state.request = None
+ return request
+
+ return unary_request
+
+
+def _call_behavior(
+ rpc_event: cygrpc.BaseEvent,
+ state: _RPCState,
+ behavior: ArityAgnosticMethodHandler,
+ argument: Any,
+ request_deserializer: Optional[DeserializingFunction],
+ send_response_callback: Optional[Callable[[ResponseType], None]] = None
+) -> Tuple[Union[ResponseType, Iterator[ResponseType]], bool]:
+ from grpc import _create_servicer_context # pytype: disable=pyi-error
+ with _create_servicer_context(rpc_event, state,
+ request_deserializer) as context:
+ try:
+ response_or_iterator = None
+ if send_response_callback is not None:
+ response_or_iterator = behavior(argument, context,
+ send_response_callback)
+ else:
+ response_or_iterator = behavior(argument, context)
+ return response_or_iterator, True
+ except Exception as exception: # pylint: disable=broad-except
+ with state.condition:
+ if state.aborted:
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ b'RPC Aborted')
+ elif exception not in state.rpc_errors:
+ try:
+ details = 'Exception calling application: {}'.format(
+ exception)
+ except Exception: # pylint: disable=broad-except
+ details = 'Calling application raised unprintable Exception!'
+ traceback.print_exc()
+ _LOGGER.exception(details)
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ _common.encode(details))
+ return None, False
+
+
+def _take_response_from_response_iterator(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ response_iterator: Iterator[ResponseType]) -> Tuple[ResponseType, bool]:
+ try:
+ return next(response_iterator), True
+ except StopIteration:
+ return None, True
+ except Exception as exception: # pylint: disable=broad-except
+ with state.condition:
+ if state.aborted:
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ b'RPC Aborted')
+ elif exception not in state.rpc_errors:
+ details = 'Exception iterating responses: {}'.format(exception)
+ _LOGGER.exception(details)
+ _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+ _common.encode(details))
+ return None, False
+
+
+def _serialize_response(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState, response: Any,
+ response_serializer: Optional[SerializingFunction]) -> Optional[bytes]:
+ serialized_response = _common.serialize(response, response_serializer)
+ if serialized_response is None:
+ with state.condition:
+ _abort(state, rpc_event.call, cygrpc.StatusCode.internal,
+ b'Failed to serialize response!')
+ return None
+ else:
+ return serialized_response
+
+
+def _get_send_message_op_flags_from_state(
+ state: _RPCState) -> Union[int, cygrpc.WriteFlag]:
+ if state.disable_next_compression:
+ return cygrpc.WriteFlag.no_compress
+ else:
+ return _EMPTY_FLAGS
+
+
+def _reset_per_message_state(state: _RPCState) -> None:
+ with state.condition:
+ state.disable_next_compression = False
+
+
+def _send_response(rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ serialized_response: bytes) -> bool:
+ with state.condition:
+ if not _is_rpc_state_active(state):
+ return False
+ else:
+ if state.initial_metadata_allowed:
+ operations = (
+ _get_initial_metadata_operation(state, None),
+ cygrpc.SendMessageOperation(
+ serialized_response,
+ _get_send_message_op_flags_from_state(state)),
+ )
+ state.initial_metadata_allowed = False
+ token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
+ else:
+ operations = (cygrpc.SendMessageOperation(
+ serialized_response,
+ _get_send_message_op_flags_from_state(state)),)
+ token = _SEND_MESSAGE_TOKEN
+ rpc_event.call.start_server_batch(operations,
+ _send_message(state, token))
+ state.due.add(token)
+ _reset_per_message_state(state)
+ while True:
+ state.condition.wait()
+ if token not in state.due:
+ return _is_rpc_state_active(state)
+
+
+def _status(rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ serialized_response: Optional[bytes]) -> None:
+ with state.condition:
+ if state.client is not _CANCELLED:
+ code = _completion_code(state)
+ details = _details(state)
+ operations = [
+ cygrpc.SendStatusFromServerOperation(state.trailing_metadata,
+ code, details,
+ _EMPTY_FLAGS),
+ ]
+ if state.initial_metadata_allowed:
+ operations.append(_get_initial_metadata_operation(state, None))
+ if serialized_response is not None:
+ operations.append(
+ cygrpc.SendMessageOperation(
+ serialized_response,
+ _get_send_message_op_flags_from_state(state)))
+ rpc_event.call.start_server_batch(
+ operations,
+ _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
+ state.statused = True
+ _reset_per_message_state(state)
+ state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
+
+
+def _unary_response_in_pool(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ behavior: ArityAgnosticMethodHandler, argument_thunk: Callable[[], Any],
+ request_deserializer: Optional[SerializingFunction],
+ response_serializer: Optional[SerializingFunction]) -> None:
+ cygrpc.install_context_from_request_call_event(rpc_event)
+ try:
+ argument = argument_thunk()
+ if argument is not None:
+ response, proceed = _call_behavior(rpc_event, state, behavior,
+ argument, request_deserializer)
+ if proceed:
+ serialized_response = _serialize_response(
+ rpc_event, state, response, response_serializer)
+ if serialized_response is not None:
+ _status(rpc_event, state, serialized_response)
+ except Exception: # pylint: disable=broad-except
+ traceback.print_exc()
+ finally:
+ cygrpc.uninstall_context()
+
+
+def _stream_response_in_pool(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ behavior: ArityAgnosticMethodHandler, argument_thunk: Callable[[], Any],
+ request_deserializer: Optional[DeserializingFunction],
+ response_serializer: Optional[SerializingFunction]) -> None:
+ cygrpc.install_context_from_request_call_event(rpc_event)
+
+ def send_response(response: Any) -> None:
+ if response is None:
+ _status(rpc_event, state, None)
+ else:
+ serialized_response = _serialize_response(rpc_event, state,
+ response,
+ response_serializer)
+ if serialized_response is not None:
+ _send_response(rpc_event, state, serialized_response)
+
+ try:
+ argument = argument_thunk()
+ if argument is not None:
+ if hasattr(behavior, 'experimental_non_blocking'
+ ) and behavior.experimental_non_blocking:
+ _call_behavior(rpc_event,
+ state,
+ behavior,
+ argument,
+ request_deserializer,
+ send_response_callback=send_response)
+ else:
+ response_iterator, proceed = _call_behavior(
+ rpc_event, state, behavior, argument, request_deserializer)
+ if proceed:
+ _send_message_callback_to_blocking_iterator_adapter(
+ rpc_event, state, send_response, response_iterator)
+ except Exception: # pylint: disable=broad-except
+ traceback.print_exc()
+ finally:
+ cygrpc.uninstall_context()
+
+
+def _is_rpc_state_active(state: _RPCState) -> bool:
+ return state.client is not _CANCELLED and not state.statused
+
+
+def _send_message_callback_to_blocking_iterator_adapter(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ send_response_callback: Callable[[ResponseType], None],
+ response_iterator: Iterator[ResponseType]) -> None:
+ while True:
+ response, proceed = _take_response_from_response_iterator(
+ rpc_event, state, response_iterator)
+ if proceed:
+ send_response_callback(response)
+ if not _is_rpc_state_active(state):
+ break
+ else:
+ break
+
+
+def _select_thread_pool_for_behavior(
+ behavior: ArityAgnosticMethodHandler,
+ default_thread_pool: futures.ThreadPoolExecutor
+) -> futures.ThreadPoolExecutor:
+ if hasattr(behavior, 'experimental_thread_pool') and isinstance(
+ behavior.experimental_thread_pool, futures.ThreadPoolExecutor):
+ return behavior.experimental_thread_pool
+ else:
+ return default_thread_pool
+
+
+def _handle_unary_unary(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ method_handler: grpc.RpcMethodHandler,
+ default_thread_pool: futures.ThreadPoolExecutor) -> futures.Future:
+ unary_request = _unary_request(rpc_event, state,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary,
+ default_thread_pool)
+ return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.unary_unary, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_unary_stream(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ method_handler: grpc.RpcMethodHandler,
+ default_thread_pool: futures.ThreadPoolExecutor) -> futures.Future:
+ unary_request = _unary_request(rpc_event, state,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.unary_stream,
+ default_thread_pool)
+ return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.unary_stream, unary_request,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_stream_unary(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ method_handler: grpc.RpcMethodHandler,
+ default_thread_pool: futures.ThreadPoolExecutor) -> futures.Future:
+ request_iterator = _RequestIterator(state, rpc_event.call,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.stream_unary,
+ default_thread_pool)
+ return thread_pool.submit(_unary_response_in_pool, rpc_event, state,
+ method_handler.stream_unary,
+ lambda: request_iterator,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _handle_stream_stream(
+ rpc_event: cygrpc.BaseEvent, state: _RPCState,
+ method_handler: grpc.RpcMethodHandler,
+ default_thread_pool: futures.ThreadPoolExecutor) -> futures.Future:
+ request_iterator = _RequestIterator(state, rpc_event.call,
+ method_handler.request_deserializer)
+ thread_pool = _select_thread_pool_for_behavior(method_handler.stream_stream,
+ default_thread_pool)
+ return thread_pool.submit(_stream_response_in_pool, rpc_event, state,
+ method_handler.stream_stream,
+ lambda: request_iterator,
+ method_handler.request_deserializer,
+ method_handler.response_serializer)
+
+
+def _find_method_handler(
+ rpc_event: cygrpc.BaseEvent, generic_handlers: List[grpc.GenericRpcHandler],
+ interceptor_pipeline: Optional[_interceptor._ServicePipeline]
+) -> Optional[grpc.RpcMethodHandler]:
+
+ def query_handlers(
+ handler_call_details: _HandlerCallDetails
+ ) -> Optional[grpc.RpcMethodHandler]:
+ for generic_handler in generic_handlers:
+ method_handler = generic_handler.service(handler_call_details)
+ if method_handler is not None:
+ return method_handler
+ return None
+
+ handler_call_details = _HandlerCallDetails(
+ _common.decode(rpc_event.call_details.method),
+ rpc_event.invocation_metadata)
+
+ if interceptor_pipeline is not None:
+ return interceptor_pipeline.execute(query_handlers,
+ handler_call_details)
+ else:
+ return query_handlers(handler_call_details)
+
+
+def _reject_rpc(rpc_event: cygrpc.BaseEvent, status: cygrpc.StatusCode,
+ details: bytes) -> _RPCState:
+ rpc_state = _RPCState()
+ operations = (
+ _get_initial_metadata_operation(rpc_state, None),
+ cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
+ cygrpc.SendStatusFromServerOperation(None, status, details,
+ _EMPTY_FLAGS),
+ )
+ rpc_event.call.start_server_batch(operations, lambda ignored_event: (
+ rpc_state,
+ (),
+ ))
+ return rpc_state
+
+
+def _handle_with_method_handler(
+ rpc_event: cygrpc.BaseEvent, method_handler: grpc.RpcMethodHandler,
+ thread_pool: futures.ThreadPoolExecutor
+) -> Tuple[_RPCState, futures.Future]:
+ state = _RPCState()
+ with state.condition:
+ rpc_event.call.start_server_batch(
+ (cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
+ _receive_close_on_server(state))
+ state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
+ if method_handler.request_streaming:
+ if method_handler.response_streaming:
+ return state, _handle_stream_stream(rpc_event, state,
+ method_handler, thread_pool)
+ else:
+ return state, _handle_stream_unary(rpc_event, state,
+ method_handler, thread_pool)
+ else:
+ if method_handler.response_streaming:
+ return state, _handle_unary_stream(rpc_event, state,
+ method_handler, thread_pool)
+ else:
+ return state, _handle_unary_unary(rpc_event, state,
+ method_handler, thread_pool)
+
+
+def _handle_call(
+ rpc_event: cygrpc.BaseEvent, generic_handlers: List[grpc.GenericRpcHandler],
+ interceptor_pipeline: Optional[_interceptor._ServicePipeline],
+ thread_pool: futures.ThreadPoolExecutor, concurrency_exceeded: bool
+) -> Tuple[Optional[_RPCState], Optional[futures.Future]]:
+ if not rpc_event.success:
+ return None, None
+ if rpc_event.call_details.method is not None:
+ try:
+ method_handler = _find_method_handler(rpc_event, generic_handlers,
+ interceptor_pipeline)
+ except Exception as exception: # pylint: disable=broad-except
+ details = 'Exception servicing handler: {}'.format(exception)
+ _LOGGER.exception(details)
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.unknown,
+ b'Error in service handler!'), None
+ if method_handler is None:
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.unimplemented,
+ b'Method not found!'), None
+ elif concurrency_exceeded:
+ return _reject_rpc(rpc_event, cygrpc.StatusCode.resource_exhausted,
+ b'Concurrent RPC limit exceeded!'), None
+ else:
+ return _handle_with_method_handler(rpc_event, method_handler,
+ thread_pool)
+ else:
+ return None, None
+
+
+@enum.unique
+class _ServerStage(enum.Enum):
+ STOPPED = 'stopped'
+ STARTED = 'started'
+ GRACE = 'grace'
+
+
+class _ServerState(object):
+ lock: threading.RLock
+ completion_queue: cygrpc.CompletionQueue
+ server: cygrpc.Server
+ generic_handlers: List[grpc.GenericRpcHandler]
+ interceptor_pipeline: Optional[_interceptor._ServicePipeline]
+ thread_pool: futures.ThreadPoolExecutor
+ stage: _ServerStage
+ termination_event: threading.Event
+ shutdown_events: List[threading.Event]
+ maximum_concurrent_rpcs: Optional[int]
+ active_rpc_count: int
+ rpc_states: Set[_RPCState]
+ due: Set[str]
+ server_deallocated: bool
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, completion_queue: cygrpc.CompletionQueue,
+ server: cygrpc.Server,
+ generic_handlers: Sequence[grpc.GenericRpcHandler],
+ interceptor_pipeline: Optional[_interceptor._ServicePipeline],
+ thread_pool: futures.ThreadPoolExecutor,
+ maximum_concurrent_rpcs: Optional[int]):
+ self.lock = threading.RLock()
+ self.completion_queue = completion_queue
+ self.server = server
+ self.generic_handlers = list(generic_handlers)
+ self.interceptor_pipeline = interceptor_pipeline
+ self.thread_pool = thread_pool
+ self.stage = _ServerStage.STOPPED
+ self.termination_event = threading.Event()
+ self.shutdown_events = [self.termination_event]
+ self.maximum_concurrent_rpcs = maximum_concurrent_rpcs
+ self.active_rpc_count = 0
+
+ # TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
+ self.rpc_states = set()
+ self.due = set()
+
+ # A "volatile" flag to interrupt the daemon serving thread
+ self.server_deallocated = False
+
+
+def _add_generic_handlers(
+ state: _ServerState,
+ generic_handlers: Iterable[grpc.GenericRpcHandler]) -> None:
+ with state.lock:
+ state.generic_handlers.extend(generic_handlers)
+
+
+def _add_insecure_port(state: _ServerState, address: bytes) -> int:
+ with state.lock:
+ return state.server.add_http2_port(address)
+
+
+def _add_secure_port(state: _ServerState, address: bytes,
+ server_credentials: grpc.ServerCredentials) -> int:
+ with state.lock:
+ return state.server.add_http2_port(address,
+ server_credentials._credentials)
+
+
+def _request_call(state: _ServerState) -> None:
+ state.server.request_call(state.completion_queue, state.completion_queue,
+ _REQUEST_CALL_TAG)
+ state.due.add(_REQUEST_CALL_TAG)
+
+
+# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
+def _stop_serving(state: _ServerState) -> bool:
+ if not state.rpc_states and not state.due:
+ state.server.destroy()
+ for shutdown_event in state.shutdown_events:
+ shutdown_event.set()
+ state.stage = _ServerStage.STOPPED
+ return True
+ else:
+ return False
+
+
+def _on_call_completed(state: _ServerState) -> None:
+ with state.lock:
+ state.active_rpc_count -= 1
+
+
+def _process_event_and_continue(state: _ServerState,
+ event: cygrpc.BaseEvent) -> bool:
+ should_continue = True
+ if event.tag is _SHUTDOWN_TAG:
+ with state.lock:
+ state.due.remove(_SHUTDOWN_TAG)
+ if _stop_serving(state):
+ should_continue = False
+ elif event.tag is _REQUEST_CALL_TAG:
+ with state.lock:
+ state.due.remove(_REQUEST_CALL_TAG)
+ concurrency_exceeded = (
+ state.maximum_concurrent_rpcs is not None and
+ state.active_rpc_count >= state.maximum_concurrent_rpcs)
+ rpc_state, rpc_future = _handle_call(event, state.generic_handlers,
+ state.interceptor_pipeline,
+ state.thread_pool,
+ concurrency_exceeded)
+ if rpc_state is not None:
+ state.rpc_states.add(rpc_state)
+ if rpc_future is not None:
+ state.active_rpc_count += 1
+ rpc_future.add_done_callback(
+ lambda unused_future: _on_call_completed(state))
+ if state.stage is _ServerStage.STARTED:
+ _request_call(state)
+ elif _stop_serving(state):
+ should_continue = False
+ else:
+ rpc_state, callbacks = event.tag(event)
+ for callback in callbacks:
+ try:
+ callback()
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception('Exception calling callback!')
+ if rpc_state is not None:
+ with state.lock:
+ state.rpc_states.remove(rpc_state)
+ if _stop_serving(state):
+ should_continue = False
+ return should_continue
+
+
+def _serve(state: _ServerState) -> None:
+ while True:
+ timeout = time.time() + _DEALLOCATED_SERVER_CHECK_PERIOD_S
+ event = state.completion_queue.poll(timeout)
+ if state.server_deallocated:
+ _begin_shutdown_once(state)
+ if event.completion_type != cygrpc.CompletionType.queue_timeout:
+ if not _process_event_and_continue(state, event):
+ return
+ # We want to force the deletion of the previous event
+ # ~before~ we poll again; if the event has a reference
+ # to a shutdown Call object, this can induce spinlock.
+ event = None
+
+
+def _begin_shutdown_once(state: _ServerState) -> None:
+ with state.lock:
+ if state.stage is _ServerStage.STARTED:
+ state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
+ state.stage = _ServerStage.GRACE
+ state.due.add(_SHUTDOWN_TAG)
+
+
+def _stop(state: _ServerState, grace: Optional[float]) -> threading.Event:
+ with state.lock:
+ if state.stage is _ServerStage.STOPPED:
+ shutdown_event = threading.Event()
+ shutdown_event.set()
+ return shutdown_event
+ else:
+ _begin_shutdown_once(state)
+ shutdown_event = threading.Event()
+ state.shutdown_events.append(shutdown_event)
+ if grace is None:
+ state.server.cancel_all_calls()
+ else:
+
+ def cancel_all_calls_after_grace():
+ shutdown_event.wait(timeout=grace)
+ with state.lock:
+ state.server.cancel_all_calls()
+
+ thread = threading.Thread(target=cancel_all_calls_after_grace)
+ thread.start()
+ return shutdown_event
+ shutdown_event.wait()
+ return shutdown_event
+
+
+def _start(state: _ServerState) -> None:
+ with state.lock:
+ if state.stage is not _ServerStage.STOPPED:
+ raise ValueError('Cannot start already-started server!')
+ state.server.start()
+ state.stage = _ServerStage.STARTED
+ _request_call(state)
+
+ thread = threading.Thread(target=_serve, args=(state,))
+ thread.daemon = True
+ thread.start()
+
+
+def _validate_generic_rpc_handlers(
+ generic_rpc_handlers: Iterable[grpc.GenericRpcHandler]) -> None:
+ for generic_rpc_handler in generic_rpc_handlers:
+ service_attribute = getattr(generic_rpc_handler, 'service', None)
+ if service_attribute is None:
+ raise AttributeError(
+ '"{}" must conform to grpc.GenericRpcHandler type but does '
+ 'not have "service" method!'.format(generic_rpc_handler))
+
+
+def _augment_options(
+ base_options: Sequence[ChannelArgumentType],
+ compression: Optional[grpc.Compression]
+) -> Sequence[ChannelArgumentType]:
+ compression_option = _compression.create_channel_option(compression)
+ return tuple(base_options) + compression_option
+
+
+class _Server(grpc.Server):
+ _state: _ServerState
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, thread_pool: futures.ThreadPoolExecutor,
+ generic_handlers: Sequence[grpc.GenericRpcHandler],
+ interceptors: Sequence[grpc.ServerInterceptor],
+ options: Sequence[ChannelArgumentType],
+ maximum_concurrent_rpcs: Optional[int],
+ compression: Optional[grpc.Compression], xds: bool):
+ completion_queue = cygrpc.CompletionQueue()
+ server = cygrpc.Server(_augment_options(options, compression), xds)
+ server.register_completion_queue(completion_queue)
+ self._state = _ServerState(completion_queue, server, generic_handlers,
+ _interceptor.service_pipeline(interceptors),
+ thread_pool, maximum_concurrent_rpcs)
+
+ def add_generic_rpc_handlers(
+ self,
+ generic_rpc_handlers: Iterable[grpc.GenericRpcHandler]) -> None:
+ _validate_generic_rpc_handlers(generic_rpc_handlers)
+ _add_generic_handlers(self._state, generic_rpc_handlers)
+
+ def add_insecure_port(self, address: str) -> int:
+ return _common.validate_port_binding_result(
+ address, _add_insecure_port(self._state, _common.encode(address)))
+
+ def add_secure_port(self, address: str,
+ server_credentials: grpc.ServerCredentials) -> int:
+ return _common.validate_port_binding_result(
+ address,
+ _add_secure_port(self._state, _common.encode(address),
+ server_credentials))
+
+ def start(self) -> None:
+ _start(self._state)
+
+ def wait_for_termination(self, timeout: Optional[float] = None) -> bool:
+ # NOTE(https://bugs.python.org/issue35935)
+ # Remove this workaround once threading.Event.wait() is working with
+ # CTRL+C across platforms.
+ return _common.wait(self._state.termination_event.wait,
+ self._state.termination_event.is_set,
+ timeout=timeout)
+
+ def stop(self, grace: Optional[float]) -> threading.Event:
+ return _stop(self._state, grace)
+
+ def __del__(self):
+ if hasattr(self, '_state'):
+ # We can not grab a lock in __del__(), so set a flag to signal the
+ # serving daemon thread (if it exists) to initiate shutdown.
+ self._state.server_deallocated = True
+
+
+def create_server(thread_pool: futures.ThreadPoolExecutor,
+ generic_rpc_handlers: Sequence[grpc.GenericRpcHandler],
+ interceptors: Sequence[grpc.ServerInterceptor],
+ options: Sequence[ChannelArgumentType],
+ maximum_concurrent_rpcs: Optional[int],
+ compression: Optional[grpc.Compression],
+ xds: bool) -> _Server:
+ _validate_generic_rpc_handlers(generic_rpc_handlers)
+ return _Server(thread_pool, generic_rpc_handlers, interceptors, options,
+ maximum_concurrent_rpcs, compression, xds)
diff --git a/contrib/python/grpcio/py3/grpc/_simple_stubs.py b/contrib/python/grpcio/py3/grpc/_simple_stubs.py
new file mode 100644
index 0000000000..54c2a2d5db
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_simple_stubs.py
@@ -0,0 +1,486 @@
+# Copyright 2020 The gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Functions that obviate explicit stubs and explicit channels."""
+
+import collections
+import datetime
+import logging
+import os
+import threading
+from typing import (Any, AnyStr, Callable, Dict, Iterator, Optional, Sequence,
+ Tuple, TypeVar, Union)
+
+import grpc
+from grpc.experimental import experimental_api
+
+RequestType = TypeVar('RequestType')
+ResponseType = TypeVar('ResponseType')
+
+OptionsType = Sequence[Tuple[str, str]]
+CacheKey = Tuple[str, OptionsType, Optional[grpc.ChannelCredentials],
+ Optional[grpc.Compression]]
+
+_LOGGER = logging.getLogger(__name__)
+
+_EVICTION_PERIOD_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"
+if _EVICTION_PERIOD_KEY in os.environ:
+ _EVICTION_PERIOD = datetime.timedelta(
+ seconds=float(os.environ[_EVICTION_PERIOD_KEY]))
+ _LOGGER.debug("Setting managed channel eviction period to %s",
+ _EVICTION_PERIOD)
+else:
+ _EVICTION_PERIOD = datetime.timedelta(minutes=10)
+
+_MAXIMUM_CHANNELS_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"
+if _MAXIMUM_CHANNELS_KEY in os.environ:
+ _MAXIMUM_CHANNELS = int(os.environ[_MAXIMUM_CHANNELS_KEY])
+ _LOGGER.debug("Setting maximum managed channels to %d", _MAXIMUM_CHANNELS)
+else:
+ _MAXIMUM_CHANNELS = 2**8
+
+_DEFAULT_TIMEOUT_KEY = "GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS"
+if _DEFAULT_TIMEOUT_KEY in os.environ:
+ _DEFAULT_TIMEOUT = float(os.environ[_DEFAULT_TIMEOUT_KEY])
+ _LOGGER.debug("Setting default timeout seconds to %f", _DEFAULT_TIMEOUT)
+else:
+ _DEFAULT_TIMEOUT = 60.0
+
+
+def _create_channel(target: str, options: Sequence[Tuple[str, str]],
+ channel_credentials: Optional[grpc.ChannelCredentials],
+ compression: Optional[grpc.Compression]) -> grpc.Channel:
+ _LOGGER.debug(
+ f"Creating secure channel with credentials '{channel_credentials}', " +
+ f"options '{options}' and compression '{compression}'")
+ return grpc.secure_channel(target,
+ credentials=channel_credentials,
+ options=options,
+ compression=compression)
+
+
+class ChannelCache:
+ # NOTE(rbellevi): Untyped due to reference cycle.
+ _singleton = None
+ _lock: threading.RLock = threading.RLock()
+ _condition: threading.Condition = threading.Condition(lock=_lock)
+ _eviction_ready: threading.Event = threading.Event()
+
+ _mapping: Dict[CacheKey, Tuple[grpc.Channel, datetime.datetime]]
+ _eviction_thread: threading.Thread
+
+ def __init__(self):
+ self._mapping = collections.OrderedDict()
+ self._eviction_thread = threading.Thread(
+ target=ChannelCache._perform_evictions, daemon=True)
+ self._eviction_thread.start()
+
+ @staticmethod
+ def get():
+ with ChannelCache._lock:
+ if ChannelCache._singleton is None:
+ ChannelCache._singleton = ChannelCache()
+ ChannelCache._eviction_ready.wait()
+ return ChannelCache._singleton
+
+ def _evict_locked(self, key: CacheKey):
+ channel, _ = self._mapping.pop(key)
+ _LOGGER.debug("Evicting channel %s with configuration %s.", channel,
+ key)
+ channel.close()
+ del channel
+
+ @staticmethod
+ def _perform_evictions():
+ while True:
+ with ChannelCache._lock:
+ ChannelCache._eviction_ready.set()
+ if not ChannelCache._singleton._mapping:
+ ChannelCache._condition.wait()
+ elif len(ChannelCache._singleton._mapping) > _MAXIMUM_CHANNELS:
+ key = next(iter(ChannelCache._singleton._mapping.keys()))
+ ChannelCache._singleton._evict_locked(key)
+ # And immediately reevaluate.
+ else:
+ key, (_, eviction_time) = next(
+ iter(ChannelCache._singleton._mapping.items()))
+ now = datetime.datetime.now()
+ if eviction_time <= now:
+ ChannelCache._singleton._evict_locked(key)
+ continue
+ else:
+ time_to_eviction = (eviction_time - now).total_seconds()
+ # NOTE: We aim to *eventually* coalesce to a state in
+ # which no overdue channels are in the cache and the
+ # length of the cache is longer than _MAXIMUM_CHANNELS.
+ # We tolerate momentary states in which these two
+ # criteria are not met.
+ ChannelCache._condition.wait(timeout=time_to_eviction)
+
+ def get_channel(self, target: str, options: Sequence[Tuple[str, str]],
+ channel_credentials: Optional[grpc.ChannelCredentials],
+ insecure: bool,
+ compression: Optional[grpc.Compression]) -> grpc.Channel:
+ if insecure and channel_credentials:
+ raise ValueError("The insecure option is mutually exclusive with " +
+ "the channel_credentials option. Please use one " +
+ "or the other.")
+ if insecure:
+ channel_credentials = grpc.experimental.insecure_channel_credentials(
+ )
+ elif channel_credentials is None:
+ _LOGGER.debug("Defaulting to SSL channel credentials.")
+ channel_credentials = grpc.ssl_channel_credentials()
+ key = (target, options, channel_credentials, compression)
+ with self._lock:
+ channel_data = self._mapping.get(key, None)
+ if channel_data is not None:
+ channel = channel_data[0]
+ self._mapping.pop(key)
+ self._mapping[key] = (channel, datetime.datetime.now() +
+ _EVICTION_PERIOD)
+ return channel
+ else:
+ channel = _create_channel(target, options, channel_credentials,
+ compression)
+ self._mapping[key] = (channel, datetime.datetime.now() +
+ _EVICTION_PERIOD)
+ if len(self._mapping) == 1 or len(
+ self._mapping) >= _MAXIMUM_CHANNELS:
+ self._condition.notify()
+ return channel
+
+ def _test_only_channel_count(self) -> int:
+ with self._lock:
+ return len(self._mapping)
+
+
+@experimental_api
+def unary_unary(
+ request: RequestType,
+ target: str,
+ method: str,
+ request_serializer: Optional[Callable[[Any], bytes]] = None,
+ response_deserializer: Optional[Callable[[bytes], Any]] = None,
+ options: Sequence[Tuple[AnyStr, AnyStr]] = (),
+ channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ insecure: bool = False,
+ call_credentials: Optional[grpc.CallCredentials] = None,
+ compression: Optional[grpc.Compression] = None,
+ wait_for_ready: Optional[bool] = None,
+ timeout: Optional[float] = _DEFAULT_TIMEOUT,
+ metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
+) -> ResponseType:
+ """Invokes a unary-unary RPC without an explicitly specified channel.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ This is backed by a per-process cache of channels. Channels are evicted
+ from the cache after a fixed period by a background. Channels will also be
+ evicted if more than a configured maximum accumulate.
+
+ The default eviction period is 10 minutes. One may set the environment
+ variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
+
+ The default maximum number of channels is 256. One may set the
+ environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
+ this.
+
+ Args:
+ request: An iterator that yields request values for the RPC.
+ target: The server address.
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the response
+ message. Response goes undeserialized in case None is passed.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
+ runtime) to configure the channel.
+ channel_credentials: A credential applied to the whole channel, e.g. the
+ return value of grpc.ssl_channel_credentials() or
+ grpc.insecure_channel_credentials().
+ insecure: If True, specifies channel_credentials as
+ :term:`grpc.insecure_channel_credentials()`. This option is mutually
+ exclusive with the `channel_credentials` option.
+ call_credentials: A call credential applied to each call individually,
+ e.g. the output of grpc.metadata_call_credentials() or
+ grpc.access_token_call_credentials().
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
+ wait_for_ready: An optional flag indicating whether the RPC should fail
+ immediately if the connection is not ready at the time the RPC is
+ invoked, or if it should wait until the connection to the server
+ becomes ready. When using this option, the user will likely also want
+ to set a timeout. Defaults to True.
+ timeout: An optional duration of time in seconds to allow for the RPC,
+ after which an exception will be raised. If timeout is unspecified,
+ defaults to a timeout controlled by the
+ GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
+ unset, defaults to 60 seconds. Supply a value of None to indicate that
+ no timeout should be enforced.
+ metadata: Optional metadata to send to the server.
+
+ Returns:
+ The response to the RPC.
+ """
+ channel = ChannelCache.get().get_channel(target, options,
+ channel_credentials, insecure,
+ compression)
+ multicallable = channel.unary_unary(method, request_serializer,
+ response_deserializer)
+ wait_for_ready = wait_for_ready if wait_for_ready is not None else True
+ return multicallable(request,
+ metadata=metadata,
+ wait_for_ready=wait_for_ready,
+ credentials=call_credentials,
+ timeout=timeout)
+
+
+@experimental_api
+def unary_stream(
+ request: RequestType,
+ target: str,
+ method: str,
+ request_serializer: Optional[Callable[[Any], bytes]] = None,
+ response_deserializer: Optional[Callable[[bytes], Any]] = None,
+ options: Sequence[Tuple[AnyStr, AnyStr]] = (),
+ channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ insecure: bool = False,
+ call_credentials: Optional[grpc.CallCredentials] = None,
+ compression: Optional[grpc.Compression] = None,
+ wait_for_ready: Optional[bool] = None,
+ timeout: Optional[float] = _DEFAULT_TIMEOUT,
+ metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
+) -> Iterator[ResponseType]:
+ """Invokes a unary-stream RPC without an explicitly specified channel.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ This is backed by a per-process cache of channels. Channels are evicted
+ from the cache after a fixed period by a background. Channels will also be
+ evicted if more than a configured maximum accumulate.
+
+ The default eviction period is 10 minutes. One may set the environment
+ variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
+
+ The default maximum number of channels is 256. One may set the
+ environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
+ this.
+
+ Args:
+ request: An iterator that yields request values for the RPC.
+ target: The server address.
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the response
+ message. Response goes undeserialized in case None is passed.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
+ runtime) to configure the channel.
+ channel_credentials: A credential applied to the whole channel, e.g. the
+ return value of grpc.ssl_channel_credentials().
+ insecure: If True, specifies channel_credentials as
+ :term:`grpc.insecure_channel_credentials()`. This option is mutually
+ exclusive with the `channel_credentials` option.
+ call_credentials: A call credential applied to each call individually,
+ e.g. the output of grpc.metadata_call_credentials() or
+ grpc.access_token_call_credentials().
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
+ wait_for_ready: An optional flag indicating whether the RPC should fail
+ immediately if the connection is not ready at the time the RPC is
+ invoked, or if it should wait until the connection to the server
+ becomes ready. When using this option, the user will likely also want
+ to set a timeout. Defaults to True.
+ timeout: An optional duration of time in seconds to allow for the RPC,
+ after which an exception will be raised. If timeout is unspecified,
+ defaults to a timeout controlled by the
+ GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
+ unset, defaults to 60 seconds. Supply a value of None to indicate that
+ no timeout should be enforced.
+ metadata: Optional metadata to send to the server.
+
+ Returns:
+ An iterator of responses.
+ """
+ channel = ChannelCache.get().get_channel(target, options,
+ channel_credentials, insecure,
+ compression)
+ multicallable = channel.unary_stream(method, request_serializer,
+ response_deserializer)
+ wait_for_ready = wait_for_ready if wait_for_ready is not None else True
+ return multicallable(request,
+ metadata=metadata,
+ wait_for_ready=wait_for_ready,
+ credentials=call_credentials,
+ timeout=timeout)
+
+
+@experimental_api
+def stream_unary(
+ request_iterator: Iterator[RequestType],
+ target: str,
+ method: str,
+ request_serializer: Optional[Callable[[Any], bytes]] = None,
+ response_deserializer: Optional[Callable[[bytes], Any]] = None,
+ options: Sequence[Tuple[AnyStr, AnyStr]] = (),
+ channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ insecure: bool = False,
+ call_credentials: Optional[grpc.CallCredentials] = None,
+ compression: Optional[grpc.Compression] = None,
+ wait_for_ready: Optional[bool] = None,
+ timeout: Optional[float] = _DEFAULT_TIMEOUT,
+ metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
+) -> ResponseType:
+ """Invokes a stream-unary RPC without an explicitly specified channel.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ This is backed by a per-process cache of channels. Channels are evicted
+ from the cache after a fixed period by a background. Channels will also be
+ evicted if more than a configured maximum accumulate.
+
+ The default eviction period is 10 minutes. One may set the environment
+ variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
+
+ The default maximum number of channels is 256. One may set the
+ environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
+ this.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ target: The server address.
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the response
+ message. Response goes undeserialized in case None is passed.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
+ runtime) to configure the channel.
+ channel_credentials: A credential applied to the whole channel, e.g. the
+ return value of grpc.ssl_channel_credentials().
+ call_credentials: A call credential applied to each call individually,
+ e.g. the output of grpc.metadata_call_credentials() or
+ grpc.access_token_call_credentials().
+ insecure: If True, specifies channel_credentials as
+ :term:`grpc.insecure_channel_credentials()`. This option is mutually
+ exclusive with the `channel_credentials` option.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
+ wait_for_ready: An optional flag indicating whether the RPC should fail
+ immediately if the connection is not ready at the time the RPC is
+ invoked, or if it should wait until the connection to the server
+ becomes ready. When using this option, the user will likely also want
+ to set a timeout. Defaults to True.
+ timeout: An optional duration of time in seconds to allow for the RPC,
+ after which an exception will be raised. If timeout is unspecified,
+ defaults to a timeout controlled by the
+ GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
+ unset, defaults to 60 seconds. Supply a value of None to indicate that
+ no timeout should be enforced.
+ metadata: Optional metadata to send to the server.
+
+ Returns:
+ The response to the RPC.
+ """
+ channel = ChannelCache.get().get_channel(target, options,
+ channel_credentials, insecure,
+ compression)
+ multicallable = channel.stream_unary(method, request_serializer,
+ response_deserializer)
+ wait_for_ready = wait_for_ready if wait_for_ready is not None else True
+ return multicallable(request_iterator,
+ metadata=metadata,
+ wait_for_ready=wait_for_ready,
+ credentials=call_credentials,
+ timeout=timeout)
+
+
+@experimental_api
+def stream_stream(
+ request_iterator: Iterator[RequestType],
+ target: str,
+ method: str,
+ request_serializer: Optional[Callable[[Any], bytes]] = None,
+ response_deserializer: Optional[Callable[[bytes], Any]] = None,
+ options: Sequence[Tuple[AnyStr, AnyStr]] = (),
+ channel_credentials: Optional[grpc.ChannelCredentials] = None,
+ insecure: bool = False,
+ call_credentials: Optional[grpc.CallCredentials] = None,
+ compression: Optional[grpc.Compression] = None,
+ wait_for_ready: Optional[bool] = None,
+ timeout: Optional[float] = _DEFAULT_TIMEOUT,
+ metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
+) -> Iterator[ResponseType]:
+ """Invokes a stream-stream RPC without an explicitly specified channel.
+
+ THIS IS AN EXPERIMENTAL API.
+
+ This is backed by a per-process cache of channels. Channels are evicted
+ from the cache after a fixed period by a background. Channels will also be
+ evicted if more than a configured maximum accumulate.
+
+ The default eviction period is 10 minutes. One may set the environment
+ variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
+
+ The default maximum number of channels is 256. One may set the
+ environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
+ this.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ target: The server address.
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the response
+ message. Response goes undeserialized in case None is passed.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
+ runtime) to configure the channel.
+ channel_credentials: A credential applied to the whole channel, e.g. the
+ return value of grpc.ssl_channel_credentials().
+ call_credentials: A call credential applied to each call individually,
+ e.g. the output of grpc.metadata_call_credentials() or
+ grpc.access_token_call_credentials().
+ insecure: If True, specifies channel_credentials as
+ :term:`grpc.insecure_channel_credentials()`. This option is mutually
+ exclusive with the `channel_credentials` option.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
+ wait_for_ready: An optional flag indicating whether the RPC should fail
+ immediately if the connection is not ready at the time the RPC is
+ invoked, or if it should wait until the connection to the server
+ becomes ready. When using this option, the user will likely also want
+ to set a timeout. Defaults to True.
+ timeout: An optional duration of time in seconds to allow for the RPC,
+ after which an exception will be raised. If timeout is unspecified,
+ defaults to a timeout controlled by the
+ GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
+ unset, defaults to 60 seconds. Supply a value of None to indicate that
+ no timeout should be enforced.
+ metadata: Optional metadata to send to the server.
+
+ Returns:
+ An iterator of responses.
+ """
+ channel = ChannelCache.get().get_channel(target, options,
+ channel_credentials, insecure,
+ compression)
+ multicallable = channel.stream_stream(method, request_serializer,
+ response_deserializer)
+ wait_for_ready = wait_for_ready if wait_for_ready is not None else True
+ return multicallable(request_iterator,
+ metadata=metadata,
+ wait_for_ready=wait_for_ready,
+ credentials=call_credentials,
+ timeout=timeout)
diff --git a/contrib/python/grpcio/py3/grpc/_typing.py b/contrib/python/grpcio/py3/grpc/_typing.py
new file mode 100644
index 0000000000..d2a0b47215
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_typing.py
@@ -0,0 +1,58 @@
+# Copyright 2022 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Common types for gRPC Sync API"""
+
+from typing import (TYPE_CHECKING, Any, Callable, Iterable, Iterator, Optional,
+ Sequence, Tuple, TypeVar, Union)
+
+from grpc._cython import cygrpc
+
+if TYPE_CHECKING:
+ from grpc import ServicerContext
+ from grpc._server import _RPCState
+
+RequestType = TypeVar('RequestType')
+ResponseType = TypeVar('ResponseType')
+SerializingFunction = Callable[[Any], bytes]
+DeserializingFunction = Callable[[bytes], Any]
+MetadataType = Sequence[Tuple[str, Union[str, bytes]]]
+ChannelArgumentType = Tuple[str, Any]
+DoneCallbackType = Callable[[Any], None]
+NullaryCallbackType = Callable[[], None]
+RequestIterableType = Iterable[Any]
+ResponseIterableType = Iterable[Any]
+UserTag = Callable[[cygrpc.BaseEvent], bool]
+IntegratedCallFactory = Callable[[
+ int, bytes, None, Optional[float], Optional[MetadataType], Optional[
+ cygrpc.CallCredentials], Sequence[Sequence[cygrpc.
+ Operation]], UserTag, Any
+], cygrpc.IntegratedCall]
+ServerTagCallbackType = Tuple[Optional['_RPCState'],
+ Sequence[NullaryCallbackType]]
+ServerCallbackTag = Callable[[cygrpc.BaseEvent], ServerTagCallbackType]
+ArityAgnosticMethodHandler = Union[
+ Callable[[RequestType, 'ServicerContext', Callable[[ResponseType], None]],
+ ResponseType],
+ Callable[[RequestType, 'ServicerContext', Callable[[ResponseType], None]],
+ Iterator[ResponseType]],
+ Callable[[
+ Iterator[RequestType], 'ServicerContext', Callable[[ResponseType], None]
+ ], ResponseType], Callable[[
+ Iterator[RequestType], 'ServicerContext', Callable[[ResponseType], None]
+ ], Iterator[ResponseType]], Callable[[RequestType, 'ServicerContext'],
+ ResponseType],
+ Callable[[RequestType, 'ServicerContext'], Iterator[ResponseType]],
+ Callable[[Iterator[RequestType], 'ServicerContext'],
+ ResponseType], Callable[[Iterator[RequestType], 'ServicerContext'],
+ Iterator[ResponseType]]]
diff --git a/contrib/python/grpcio/py3/grpc/_utilities.py b/contrib/python/grpcio/py3/grpc/_utilities.py
new file mode 100644
index 0000000000..3dafa7a03d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/_utilities.py
@@ -0,0 +1,180 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Internal utilities for gRPC Python."""
+
+import collections
+import logging
+import threading
+import time
+from typing import Callable, Dict, Optional, Sequence
+
+import grpc # pytype: disable=pyi-error
+from grpc import _common # pytype: disable=pyi-error
+from grpc._typing import DoneCallbackType
+
+_LOGGER = logging.getLogger(__name__)
+
+_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
+ 'Exception calling connectivity future "done" callback!')
+
+
+class RpcMethodHandler(
+ collections.namedtuple('_RpcMethodHandler', (
+ 'request_streaming',
+ 'response_streaming',
+ 'request_deserializer',
+ 'response_serializer',
+ 'unary_unary',
+ 'unary_stream',
+ 'stream_unary',
+ 'stream_stream',
+ )), grpc.RpcMethodHandler):
+ pass
+
+
+class DictionaryGenericHandler(grpc.ServiceRpcHandler):
+ _name: str
+ _method_handlers: Dict[str, grpc.RpcMethodHandler]
+
+ def __init__(self, service: str,
+ method_handlers: Dict[str, grpc.RpcMethodHandler]):
+ self._name = service
+ self._method_handlers = {
+ _common.fully_qualified_method(service, method): method_handler
+ for method, method_handler in method_handlers.items()
+ }
+
+ def service_name(self) -> str:
+ return self._name
+
+ def service(
+ self, handler_call_details: grpc.HandlerCallDetails
+ ) -> Optional[grpc.RpcMethodHandler]:
+ details_method = handler_call_details.method
+ return self._method_handlers.get(details_method) # pytype: disable=attribute-error
+
+
+class _ChannelReadyFuture(grpc.Future):
+ _condition: threading.Condition
+ _channel: grpc.Channel
+ _matured: bool
+ _cancelled: bool
+ _done_callbacks: Sequence[Callable]
+
+ def __init__(self, channel: grpc.Channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout: Optional[float]) -> None:
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise grpc.FutureCancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise grpc.FutureTimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity: Optional[grpc.ChannelConnectivity]) -> None:
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is grpc.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ try:
+ done_callback(self)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE)
+
+ def cancel(self) -> bool:
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return False
+
+ for done_callback in done_callbacks:
+ try:
+ done_callback(self)
+ except Exception: # pylint: disable=broad-except
+ _LOGGER.exception(_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE)
+
+ return True
+
+ def cancelled(self) -> bool:
+ with self._condition:
+ return self._cancelled
+
+ def running(self) -> bool:
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self) -> bool:
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout: Optional[float] = None) -> None:
+ self._block(timeout)
+
+ def exception(self, timeout: Optional[float] = None) -> None:
+ self._block(timeout)
+
+ def traceback(self, timeout: Optional[float] = None) -> None:
+ self._block(timeout)
+
+ def add_done_callback(self, fn: DoneCallbackType):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
+
+
+def channel_ready_future(channel: grpc.Channel) -> _ChannelReadyFuture:
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/contrib/python/grpcio/py3/grpc/aio/__init__.py b/contrib/python/grpcio/py3/grpc/aio/__init__.py
new file mode 100644
index 0000000000..3436d2ef98
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/__init__.py
@@ -0,0 +1,95 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's Asynchronous Python API.
+
+gRPC Async API objects may only be used on the thread on which they were
+created. AsyncIO doesn't provide thread safety for most of its APIs.
+"""
+
+from typing import Any, Optional, Sequence, Tuple
+
+import grpc
+from grpc._cython.cygrpc import AbortError
+from grpc._cython.cygrpc import BaseError
+from grpc._cython.cygrpc import EOF
+from grpc._cython.cygrpc import InternalError
+from grpc._cython.cygrpc import UsageError
+from grpc._cython.cygrpc import init_grpc_aio
+from grpc._cython.cygrpc import shutdown_grpc_aio
+
+from ._base_call import Call
+from ._base_call import RpcContext
+from ._base_call import StreamStreamCall
+from ._base_call import StreamUnaryCall
+from ._base_call import UnaryStreamCall
+from ._base_call import UnaryUnaryCall
+from ._base_channel import Channel
+from ._base_channel import StreamStreamMultiCallable
+from ._base_channel import StreamUnaryMultiCallable
+from ._base_channel import UnaryStreamMultiCallable
+from ._base_channel import UnaryUnaryMultiCallable
+from ._base_server import Server
+from ._base_server import ServicerContext
+from ._call import AioRpcError
+from ._channel import insecure_channel
+from ._channel import secure_channel
+from ._interceptor import ClientCallDetails
+from ._interceptor import ClientInterceptor
+from ._interceptor import InterceptedUnaryUnaryCall
+from ._interceptor import ServerInterceptor
+from ._interceptor import StreamStreamClientInterceptor
+from ._interceptor import StreamUnaryClientInterceptor
+from ._interceptor import UnaryStreamClientInterceptor
+from ._interceptor import UnaryUnaryClientInterceptor
+from ._metadata import Metadata
+from ._server import server
+from ._typing import ChannelArgumentType
+
+################################### __all__ #################################
+
+__all__ = (
+ 'init_grpc_aio',
+ 'shutdown_grpc_aio',
+ 'AioRpcError',
+ 'RpcContext',
+ 'Call',
+ 'UnaryUnaryCall',
+ 'UnaryStreamCall',
+ 'StreamUnaryCall',
+ 'StreamStreamCall',
+ 'Channel',
+ 'UnaryUnaryMultiCallable',
+ 'UnaryStreamMultiCallable',
+ 'StreamUnaryMultiCallable',
+ 'StreamStreamMultiCallable',
+ 'ClientCallDetails',
+ 'ClientInterceptor',
+ 'UnaryStreamClientInterceptor',
+ 'UnaryUnaryClientInterceptor',
+ 'StreamUnaryClientInterceptor',
+ 'StreamStreamClientInterceptor',
+ 'InterceptedUnaryUnaryCall',
+ 'ServerInterceptor',
+ 'insecure_channel',
+ 'server',
+ 'Server',
+ 'ServicerContext',
+ 'EOF',
+ 'secure_channel',
+ 'AbortError',
+ 'BaseError',
+ 'UsageError',
+ 'InternalError',
+ 'Metadata',
+)
diff --git a/contrib/python/grpcio/py3/grpc/aio/_base_call.py b/contrib/python/grpcio/py3/grpc/aio/_base_call.py
new file mode 100644
index 0000000000..029584e94a
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_base_call.py
@@ -0,0 +1,248 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Abstract base classes for client-side Call objects.
+
+Call objects represents the RPC itself, and offer methods to access / modify
+its information. They also offer methods to manipulate the life-cycle of the
+RPC, e.g. cancellation.
+"""
+
+from abc import ABCMeta
+from abc import abstractmethod
+from typing import AsyncIterable, Awaitable, Generic, Optional, Union
+
+import grpc
+
+from ._metadata import Metadata
+from ._typing import DoneCallbackType
+from ._typing import EOFType
+from ._typing import RequestType
+from ._typing import ResponseType
+
+__all__ = 'RpcContext', 'Call', 'UnaryUnaryCall', 'UnaryStreamCall'
+
+
+class RpcContext(metaclass=ABCMeta):
+ """Provides RPC-related information and action."""
+
+ @abstractmethod
+ def cancelled(self) -> bool:
+ """Return True if the RPC is cancelled.
+
+ The RPC is cancelled when the cancellation was requested with cancel().
+
+ Returns:
+ A bool indicates whether the RPC is cancelled or not.
+ """
+
+ @abstractmethod
+ def done(self) -> bool:
+ """Return True if the RPC is done.
+
+ An RPC is done if the RPC is completed, cancelled or aborted.
+
+ Returns:
+ A bool indicates if the RPC is done.
+ """
+
+ @abstractmethod
+ def time_remaining(self) -> Optional[float]:
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have
+ timed out, or None if no deadline was specified for the RPC.
+ """
+
+ @abstractmethod
+ def cancel(self) -> bool:
+ """Cancels the RPC.
+
+ Idempotent and has no effect if the RPC has already terminated.
+
+ Returns:
+ A bool indicates if the cancellation is performed or not.
+ """
+
+ @abstractmethod
+ def add_done_callback(self, callback: DoneCallbackType) -> None:
+ """Registers a callback to be called on RPC termination.
+
+ Args:
+ callback: A callable object will be called with the call object as
+ its only argument.
+ """
+
+
+class Call(RpcContext, metaclass=ABCMeta):
+ """The abstract base class of an RPC on the client-side."""
+
+ @abstractmethod
+ async def initial_metadata(self) -> Metadata:
+ """Accesses the initial metadata sent by the server.
+
+ Returns:
+ The initial :term:`metadata`.
+ """
+
+ @abstractmethod
+ async def trailing_metadata(self) -> Metadata:
+ """Accesses the trailing metadata sent by the server.
+
+ Returns:
+ The trailing :term:`metadata`.
+ """
+
+ @abstractmethod
+ async def code(self) -> grpc.StatusCode:
+ """Accesses the status code sent by the server.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+
+ @abstractmethod
+ async def details(self) -> str:
+ """Accesses the details sent by the server.
+
+ Returns:
+ The details string of the RPC.
+ """
+
+ @abstractmethod
+ async def wait_for_connection(self) -> None:
+ """Waits until connected to peer and raises aio.AioRpcError if failed.
+
+ This is an EXPERIMENTAL method.
+
+ This method ensures the RPC has been successfully connected. Otherwise,
+ an AioRpcError will be raised to explain the reason of the connection
+ failure.
+
+ This method is recommended for building retry mechanisms.
+ """
+
+
+class UnaryUnaryCall(Generic[RequestType, ResponseType],
+ Call,
+ metaclass=ABCMeta):
+ """The abstract base class of an unary-unary RPC on the client-side."""
+
+ @abstractmethod
+ def __await__(self) -> Awaitable[ResponseType]:
+ """Await the response message to be ready.
+
+ Returns:
+ The response message of the RPC.
+ """
+
+
+class UnaryStreamCall(Generic[RequestType, ResponseType],
+ Call,
+ metaclass=ABCMeta):
+
+ @abstractmethod
+ def __aiter__(self) -> AsyncIterable[ResponseType]:
+ """Returns the async iterable representation that yields messages.
+
+ Under the hood, it is calling the "read" method.
+
+ Returns:
+ An async iterable object that yields messages.
+ """
+
+ @abstractmethod
+ async def read(self) -> Union[EOFType, ResponseType]:
+ """Reads one message from the stream.
+
+ Read operations must be serialized when called from multiple
+ coroutines.
+
+ Returns:
+ A response message, or an `grpc.aio.EOF` to indicate the end of the
+ stream.
+ """
+
+
+class StreamUnaryCall(Generic[RequestType, ResponseType],
+ Call,
+ metaclass=ABCMeta):
+
+ @abstractmethod
+ async def write(self, request: RequestType) -> None:
+ """Writes one message to the stream.
+
+ Raises:
+ An RpcError exception if the write failed.
+ """
+
+ @abstractmethod
+ async def done_writing(self) -> None:
+ """Notifies server that the client is done sending messages.
+
+ After done_writing is called, any additional invocation to the write
+ function will fail. This function is idempotent.
+ """
+
+ @abstractmethod
+ def __await__(self) -> Awaitable[ResponseType]:
+ """Await the response message to be ready.
+
+ Returns:
+ The response message of the stream.
+ """
+
+
+class StreamStreamCall(Generic[RequestType, ResponseType],
+ Call,
+ metaclass=ABCMeta):
+
+ @abstractmethod
+ def __aiter__(self) -> AsyncIterable[ResponseType]:
+ """Returns the async iterable representation that yields messages.
+
+ Under the hood, it is calling the "read" method.
+
+ Returns:
+ An async iterable object that yields messages.
+ """
+
+ @abstractmethod
+ async def read(self) -> Union[EOFType, ResponseType]:
+ """Reads one message from the stream.
+
+ Read operations must be serialized when called from multiple
+ coroutines.
+
+ Returns:
+ A response message, or an `grpc.aio.EOF` to indicate the end of the
+ stream.
+ """
+
+ @abstractmethod
+ async def write(self, request: RequestType) -> None:
+ """Writes one message to the stream.
+
+ Raises:
+ An RpcError exception if the write failed.
+ """
+
+ @abstractmethod
+ async def done_writing(self) -> None:
+ """Notifies server that the client is done sending messages.
+
+ After done_writing is called, any additional invocation to the write
+ function will fail. This function is idempotent.
+ """
diff --git a/contrib/python/grpcio/py3/grpc/aio/_base_channel.py b/contrib/python/grpcio/py3/grpc/aio/_base_channel.py
new file mode 100644
index 0000000000..4135e4796c
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_base_channel.py
@@ -0,0 +1,348 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Abstract base classes for Channel objects and Multicallable objects."""
+
+import abc
+from typing import Any, Optional
+
+import grpc
+
+from . import _base_call
+from ._typing import DeserializingFunction
+from ._typing import MetadataType
+from ._typing import RequestIterableType
+from ._typing import SerializingFunction
+
+
+class UnaryUnaryMultiCallable(abc.ABC):
+ """Enables asynchronous invocation of a unary-call RPC."""
+
+ @abc.abstractmethod
+ def __call__(
+ self,
+ request: Any,
+ *,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.UnaryUnaryCall:
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow
+ for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ A UnaryUnaryCall object.
+
+ Raises:
+ RpcError: Indicates that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+
+
+class UnaryStreamMultiCallable(abc.ABC):
+ """Enables asynchronous invocation of a server-streaming RPC."""
+
+ @abc.abstractmethod
+ def __call__(
+ self,
+ request: Any,
+ *,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.UnaryStreamCall:
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: An optional duration of time in seconds to allow
+ for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ A UnaryStreamCall object.
+
+ Raises:
+ RpcError: Indicates that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+
+
+class StreamUnaryMultiCallable(abc.ABC):
+ """Enables asynchronous invocation of a client-streaming RPC."""
+
+ @abc.abstractmethod
+ def __call__(
+ self,
+ request_iterator: Optional[RequestIterableType] = None,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.StreamUnaryCall:
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An optional async iterable or iterable of request
+ messages for the RPC.
+ timeout: An optional duration of time in seconds to allow
+ for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ A StreamUnaryCall object.
+
+ Raises:
+ RpcError: Indicates that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+
+
+class StreamStreamMultiCallable(abc.ABC):
+ """Enables asynchronous invocation of a bidirectional-streaming RPC."""
+
+ @abc.abstractmethod
+ def __call__(
+ self,
+ request_iterator: Optional[RequestIterableType] = None,
+ timeout: Optional[float] = None,
+ metadata: Optional[MetadataType] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.StreamStreamCall:
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An optional async iterable or iterable of request
+ messages for the RPC.
+ timeout: An optional duration of time in seconds to allow
+ for the RPC.
+ metadata: Optional :term:`metadata` to be transmitted to the
+ service-side of the RPC.
+ credentials: An optional CallCredentials for the RPC. Only valid for
+ secure Channel.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+
+ Returns:
+ A StreamStreamCall object.
+
+ Raises:
+ RpcError: Indicates that the RPC terminated with non-OK status. The
+ raised RpcError will also be a Call for the RPC affording the RPC's
+ metadata, status code, and details.
+ """
+
+
+class Channel(abc.ABC):
+ """Enables asynchronous RPC invocation as a client.
+
+ Channel objects implement the Asynchronous Context Manager (aka. async
+ with) type, although they are not supportted to be entered and exited
+ multiple times.
+ """
+
+ @abc.abstractmethod
+ async def __aenter__(self):
+ """Starts an asynchronous context manager.
+
+ Returns:
+ Channel the channel that was instantiated.
+ """
+
+ @abc.abstractmethod
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ """Finishes the asynchronous context manager by closing the channel.
+
+ Still active RPCs will be cancelled.
+ """
+
+ @abc.abstractmethod
+ async def close(self, grace: Optional[float] = None):
+ """Closes this Channel and releases all resources held by it.
+
+ This method immediately stops the channel from executing new RPCs in
+ all cases.
+
+ If a grace period is specified, this method wait until all active
+ RPCs are finshed, once the grace period is reached the ones that haven't
+ been terminated are cancelled. If a grace period is not specified
+ (by passing None for grace), all existing RPCs are cancelled immediately.
+
+ This method is idempotent.
+ """
+
+ @abc.abstractmethod
+ def get_state(self,
+ try_to_connect: bool = False) -> grpc.ChannelConnectivity:
+ """Checks the connectivity state of a channel.
+
+ This is an EXPERIMENTAL API.
+
+ If the channel reaches a stable connectivity state, it is guaranteed
+ that the return value of this function will eventually converge to that
+ state.
+
+ Args:
+ try_to_connect: a bool indicate whether the Channel should try to
+ connect to peer or not.
+
+ Returns: A ChannelConnectivity object.
+ """
+
+ @abc.abstractmethod
+ async def wait_for_state_change(
+ self,
+ last_observed_state: grpc.ChannelConnectivity,
+ ) -> None:
+ """Waits for a change in connectivity state.
+
+ This is an EXPERIMENTAL API.
+
+ The function blocks until there is a change in the channel connectivity
+ state from the "last_observed_state". If the state is already
+ different, this function will return immediately.
+
+ There is an inherent race between the invocation of
+ "Channel.wait_for_state_change" and "Channel.get_state". The state can
+ change arbitrary many times during the race, so there is no way to
+ observe every state transition.
+
+ If there is a need to put a timeout for this function, please refer to
+ "asyncio.wait_for".
+
+ Args:
+ last_observed_state: A grpc.ChannelConnectivity object representing
+ the last known state.
+ """
+
+ @abc.abstractmethod
+ async def channel_ready(self) -> None:
+ """Creates a coroutine that blocks until the Channel is READY."""
+
+ @abc.abstractmethod
+ def unary_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> UnaryUnaryMultiCallable:
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A UnaryUnaryMultiCallable value for the named unary-unary method.
+ """
+
+ @abc.abstractmethod
+ def unary_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> UnaryStreamMultiCallable:
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A UnarySteramMultiCallable value for the named unary-stream method.
+ """
+
+ @abc.abstractmethod
+ def stream_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> StreamUnaryMultiCallable:
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A StreamUnaryMultiCallable value for the named stream-unary method.
+ """
+
+ @abc.abstractmethod
+ def stream_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> StreamStreamMultiCallable:
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+ Args:
+ method: The name of the RPC method.
+ request_serializer: Optional :term:`serializer` for serializing the request
+ message. Request goes unserialized in case None is passed.
+ response_deserializer: Optional :term:`deserializer` for deserializing the
+ response message. Response goes undeserialized in case None
+ is passed.
+
+ Returns:
+ A StreamStreamMultiCallable value for the named stream-stream method.
+ """
diff --git a/contrib/python/grpcio/py3/grpc/aio/_base_server.py b/contrib/python/grpcio/py3/grpc/aio/_base_server.py
new file mode 100644
index 0000000000..a86bbbad09
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_base_server.py
@@ -0,0 +1,369 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Abstract base classes for server-side classes."""
+
+import abc
+from typing import Generic, Iterable, Mapping, NoReturn, Optional, Sequence
+
+import grpc
+
+from ._metadata import Metadata
+from ._typing import DoneCallbackType
+from ._typing import MetadataType
+from ._typing import RequestType
+from ._typing import ResponseType
+
+
+class Server(abc.ABC):
+ """Serves RPCs."""
+
+ @abc.abstractmethod
+ def add_generic_rpc_handlers(
+ self,
+ generic_rpc_handlers: Sequence[grpc.GenericRpcHandler]) -> None:
+ """Registers GenericRpcHandlers with this Server.
+
+ This method is only safe to call before the server is started.
+
+ Args:
+ generic_rpc_handlers: A sequence of GenericRpcHandlers that will be
+ used to service RPCs.
+ """
+
+ @abc.abstractmethod
+ def add_insecure_port(self, address: str) -> int:
+ """Opens an insecure port for accepting RPCs.
+
+ A port is a communication endpoint that used by networking protocols,
+ like TCP and UDP. To date, we only support TCP.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port. If the port is 0,
+ or not specified in the address, then the gRPC runtime will choose a port.
+
+ Returns:
+ An integer port on which the server will accept RPC requests.
+ """
+
+ @abc.abstractmethod
+ def add_secure_port(self, address: str,
+ server_credentials: grpc.ServerCredentials) -> int:
+ """Opens a secure port for accepting RPCs.
+
+ A port is a communication endpoint that used by networking protocols,
+ like TCP and UDP. To date, we only support TCP.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port.
+ if the port is 0, or not specified in the address, then the gRPC
+ runtime will choose a port.
+ server_credentials: A ServerCredentials object.
+
+ Returns:
+ An integer port on which the server will accept RPC requests.
+ """
+
+ @abc.abstractmethod
+ async def start(self) -> None:
+ """Starts this Server.
+
+ This method may only be called once. (i.e. it is not idempotent).
+ """
+
+ @abc.abstractmethod
+ async def stop(self, grace: Optional[float]) -> None:
+ """Stops this Server.
+
+ This method immediately stops the server from servicing new RPCs in
+ all cases.
+
+ If a grace period is specified, this method returns immediately and all
+ RPCs active at the end of the grace period are aborted. If a grace
+ period is not specified (by passing None for grace), all existing RPCs
+ are aborted immediately and this method blocks until the last RPC
+ handler terminates.
+
+ This method is idempotent and may be called at any time. Passing a
+ smaller grace value in a subsequent call will have the effect of
+ stopping the Server sooner (passing None will have the effect of
+ stopping the server immediately). Passing a larger grace value in a
+ subsequent call will not have the effect of stopping the server later
+ (i.e. the most restrictive grace value is used).
+
+ Args:
+ grace: A duration of time in seconds or None.
+ """
+
+ @abc.abstractmethod
+ async def wait_for_termination(self,
+ timeout: Optional[float] = None) -> bool:
+ """Continues current coroutine once the server stops.
+
+ This is an EXPERIMENTAL API.
+
+ The wait will not consume computational resources during blocking, and
+ it will block until one of the two following conditions are met:
+
+ 1) The server is stopped or terminated;
+ 2) A timeout occurs if timeout is not `None`.
+
+ The timeout argument works in the same way as `threading.Event.wait()`.
+ https://docs.python.org/3/library/threading.html#threading.Event.wait
+
+ Args:
+ timeout: A floating point number specifying a timeout for the
+ operation in seconds.
+
+ Returns:
+ A bool indicates if the operation times out.
+ """
+
+
+# pylint: disable=too-many-public-methods
+class ServicerContext(Generic[RequestType, ResponseType], abc.ABC):
+ """A context object passed to method implementations."""
+
+ @abc.abstractmethod
+ async def read(self) -> RequestType:
+ """Reads one message from the RPC.
+
+ Only one read operation is allowed simultaneously.
+
+ Returns:
+ A response message of the RPC.
+
+ Raises:
+ An RpcError exception if the read failed.
+ """
+
+ @abc.abstractmethod
+ async def write(self, message: ResponseType) -> None:
+ """Writes one message to the RPC.
+
+ Only one write operation is allowed simultaneously.
+
+ Raises:
+ An RpcError exception if the write failed.
+ """
+
+ @abc.abstractmethod
+ async def send_initial_metadata(self,
+ initial_metadata: MetadataType) -> None:
+ """Sends the initial metadata value to the client.
+
+ This method need not be called by implementations if they have no
+ metadata to add to what the gRPC runtime will transmit.
+
+ Args:
+ initial_metadata: The initial :term:`metadata`.
+ """
+
+ @abc.abstractmethod
+ async def abort(
+ self,
+ code: grpc.StatusCode,
+ details: str = '',
+ trailing_metadata: MetadataType = tuple()) -> NoReturn:
+ """Raises an exception to terminate the RPC with a non-OK status.
+
+ The code and details passed as arguments will supercede any existing
+ ones.
+
+ Args:
+ code: A StatusCode object to be sent to the client.
+ It must not be StatusCode.OK.
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+ trailing_metadata: A sequence of tuple represents the trailing
+ :term:`metadata`.
+
+ Raises:
+ Exception: An exception is always raised to signal the abortion the
+ RPC to the gRPC runtime.
+ """
+
+ @abc.abstractmethod
+ def set_trailing_metadata(self, trailing_metadata: MetadataType) -> None:
+ """Sends the trailing metadata for the RPC.
+
+ This method need not be called by implementations if they have no
+ metadata to add to what the gRPC runtime will transmit.
+
+ Args:
+ trailing_metadata: The trailing :term:`metadata`.
+ """
+
+ @abc.abstractmethod
+ def invocation_metadata(self) -> Optional[Metadata]:
+ """Accesses the metadata sent by the client.
+
+ Returns:
+ The invocation :term:`metadata`.
+ """
+
+ @abc.abstractmethod
+ def set_code(self, code: grpc.StatusCode) -> None:
+ """Sets the value to be used as status code upon RPC completion.
+
+ This method need not be called by method implementations if they wish
+ the gRPC runtime to determine the status code of the RPC.
+
+ Args:
+ code: A StatusCode object to be sent to the client.
+ """
+
+ @abc.abstractmethod
+ def set_details(self, details: str) -> None:
+ """Sets the value to be used the as detail string upon RPC completion.
+
+ This method need not be called by method implementations if they have
+ no details to transmit.
+
+ Args:
+ details: A UTF-8-encodable string to be sent to the client upon
+ termination of the RPC.
+ """
+
+ @abc.abstractmethod
+ def set_compression(self, compression: grpc.Compression) -> None:
+ """Set the compression algorithm to be used for the entire call.
+
+ Args:
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip.
+ """
+
+ @abc.abstractmethod
+ def disable_next_message_compression(self) -> None:
+ """Disables compression for the next response message.
+
+ This method will override any compression configuration set during
+ server creation or set on the call.
+ """
+
+ @abc.abstractmethod
+ def peer(self) -> str:
+ """Identifies the peer that invoked the RPC being serviced.
+
+ Returns:
+ A string identifying the peer that invoked the RPC being serviced.
+ The string format is determined by gRPC runtime.
+ """
+
+ @abc.abstractmethod
+ def peer_identities(self) -> Optional[Iterable[bytes]]:
+ """Gets one or more peer identity(s).
+
+ Equivalent to
+ servicer_context.auth_context().get(servicer_context.peer_identity_key())
+
+ Returns:
+ An iterable of the identities, or None if the call is not
+ authenticated. Each identity is returned as a raw bytes type.
+ """
+
+ @abc.abstractmethod
+ def peer_identity_key(self) -> Optional[str]:
+ """The auth property used to identify the peer.
+
+ For example, "x509_common_name" or "x509_subject_alternative_name" are
+ used to identify an SSL peer.
+
+ Returns:
+ The auth property (string) that indicates the
+ peer identity, or None if the call is not authenticated.
+ """
+
+ @abc.abstractmethod
+ def auth_context(self) -> Mapping[str, Iterable[bytes]]:
+ """Gets the auth context for the call.
+
+ Returns:
+ A map of strings to an iterable of bytes for each auth property.
+ """
+
+ def time_remaining(self) -> float:
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have
+ timed out, or None if no deadline was specified for the RPC.
+ """
+
+ def trailing_metadata(self):
+ """Access value to be used as trailing metadata upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The trailing :term:`metadata` for the RPC.
+ """
+ raise NotImplementedError()
+
+ def code(self):
+ """Accesses the value to be used as status code upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The StatusCode value for the RPC.
+ """
+ raise NotImplementedError()
+
+ def details(self):
+ """Accesses the value to be used as detail string upon RPC completion.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ The details string of the RPC.
+ """
+ raise NotImplementedError()
+
+ def add_done_callback(self, callback: DoneCallbackType) -> None:
+ """Registers a callback to be called on RPC termination.
+
+ This is an EXPERIMENTAL API.
+
+ Args:
+ callback: A callable object will be called with the servicer context
+ object as its only argument.
+ """
+
+ def cancelled(self) -> bool:
+ """Return True if the RPC is cancelled.
+
+ The RPC is cancelled when the cancellation was requested with cancel().
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ A bool indicates whether the RPC is cancelled or not.
+ """
+
+ def done(self) -> bool:
+ """Return True if the RPC is done.
+
+ An RPC is done if the RPC is completed, cancelled or aborted.
+
+ This is an EXPERIMENTAL API.
+
+ Returns:
+ A bool indicates if the RPC is done.
+ """
diff --git a/contrib/python/grpcio/py3/grpc/aio/_call.py b/contrib/python/grpcio/py3/grpc/aio/_call.py
new file mode 100644
index 0000000000..37ba945da7
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_call.py
@@ -0,0 +1,649 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Invocation-side implementation of gRPC Asyncio Python."""
+
+import asyncio
+import enum
+from functools import partial
+import inspect
+import logging
+import traceback
+from typing import AsyncIterator, Optional, Tuple
+
+import grpc
+from grpc import _common
+from grpc._cython import cygrpc
+
+from . import _base_call
+from ._metadata import Metadata
+from ._typing import DeserializingFunction
+from ._typing import DoneCallbackType
+from ._typing import MetadatumType
+from ._typing import RequestIterableType
+from ._typing import RequestType
+from ._typing import ResponseType
+from ._typing import SerializingFunction
+
+__all__ = 'AioRpcError', 'Call', 'UnaryUnaryCall', 'UnaryStreamCall'
+
+_LOCAL_CANCELLATION_DETAILS = 'Locally cancelled by application!'
+_GC_CANCELLATION_DETAILS = 'Cancelled upon garbage collection!'
+_RPC_ALREADY_FINISHED_DETAILS = 'RPC already finished.'
+_RPC_HALF_CLOSED_DETAILS = 'RPC is half closed after calling "done_writing".'
+_API_STYLE_ERROR = 'The iterator and read/write APIs may not be mixed on a single RPC.'
+
+_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '>')
+
+_NON_OK_CALL_REPRESENTATION = ('<{} of RPC that terminated with:\n'
+ '\tstatus = {}\n'
+ '\tdetails = "{}"\n'
+ '\tdebug_error_string = "{}"\n'
+ '>')
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class AioRpcError(grpc.RpcError):
+ """An implementation of RpcError to be used by the asynchronous API.
+
+ Raised RpcError is a snapshot of the final status of the RPC, values are
+ determined. Hence, its methods no longer needs to be coroutines.
+ """
+
+ _code: grpc.StatusCode
+ _details: Optional[str]
+ _initial_metadata: Optional[Metadata]
+ _trailing_metadata: Optional[Metadata]
+ _debug_error_string: Optional[str]
+
+ def __init__(self,
+ code: grpc.StatusCode,
+ initial_metadata: Metadata,
+ trailing_metadata: Metadata,
+ details: Optional[str] = None,
+ debug_error_string: Optional[str] = None) -> None:
+ """Constructor.
+
+ Args:
+ code: The status code with which the RPC has been finalized.
+ details: Optional details explaining the reason of the error.
+ initial_metadata: Optional initial metadata that could be sent by the
+ Server.
+ trailing_metadata: Optional metadata that could be sent by the Server.
+ """
+
+ super().__init__()
+ self._code = code
+ self._details = details
+ self._initial_metadata = initial_metadata
+ self._trailing_metadata = trailing_metadata
+ self._debug_error_string = debug_error_string
+
+ def code(self) -> grpc.StatusCode:
+ """Accesses the status code sent by the server.
+
+ Returns:
+ The `grpc.StatusCode` status code.
+ """
+ return self._code
+
+ def details(self) -> Optional[str]:
+ """Accesses the details sent by the server.
+
+ Returns:
+ The description of the error.
+ """
+ return self._details
+
+ def initial_metadata(self) -> Metadata:
+ """Accesses the initial metadata sent by the server.
+
+ Returns:
+ The initial metadata received.
+ """
+ return self._initial_metadata
+
+ def trailing_metadata(self) -> Metadata:
+ """Accesses the trailing metadata sent by the server.
+
+ Returns:
+ The trailing metadata received.
+ """
+ return self._trailing_metadata
+
+ def debug_error_string(self) -> str:
+ """Accesses the debug error string sent by the server.
+
+ Returns:
+ The debug error string received.
+ """
+ return self._debug_error_string
+
+ def _repr(self) -> str:
+ """Assembles the error string for the RPC error."""
+ return _NON_OK_CALL_REPRESENTATION.format(self.__class__.__name__,
+ self._code, self._details,
+ self._debug_error_string)
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+
+def _create_rpc_error(initial_metadata: Metadata,
+ status: cygrpc.AioRpcStatus) -> AioRpcError:
+ return AioRpcError(
+ _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()],
+ Metadata.from_tuple(initial_metadata),
+ Metadata.from_tuple(status.trailing_metadata()),
+ details=status.details(),
+ debug_error_string=status.debug_error_string(),
+ )
+
+
+class Call:
+ """Base implementation of client RPC Call object.
+
+ Implements logic around final status, metadata and cancellation.
+ """
+ _loop: asyncio.AbstractEventLoop
+ _code: grpc.StatusCode
+ _cython_call: cygrpc._AioCall
+ _metadata: Tuple[MetadatumType, ...]
+ _request_serializer: SerializingFunction
+ _response_deserializer: DeserializingFunction
+
+ def __init__(self, cython_call: cygrpc._AioCall, metadata: Metadata,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._cython_call = cython_call
+ self._metadata = tuple(metadata)
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __del__(self) -> None:
+ # The '_cython_call' object might be destructed before Call object
+ if hasattr(self, '_cython_call'):
+ if not self._cython_call.done():
+ self._cancel(_GC_CANCELLATION_DETAILS)
+
+ def cancelled(self) -> bool:
+ return self._cython_call.cancelled()
+
+ def _cancel(self, details: str) -> bool:
+ """Forwards the application cancellation reasoning."""
+ if not self._cython_call.done():
+ self._cython_call.cancel(details)
+ return True
+ else:
+ return False
+
+ def cancel(self) -> bool:
+ return self._cancel(_LOCAL_CANCELLATION_DETAILS)
+
+ def done(self) -> bool:
+ return self._cython_call.done()
+
+ def add_done_callback(self, callback: DoneCallbackType) -> None:
+ cb = partial(callback, self)
+ self._cython_call.add_done_callback(cb)
+
+ def time_remaining(self) -> Optional[float]:
+ return self._cython_call.time_remaining()
+
+ async def initial_metadata(self) -> Metadata:
+ raw_metadata_tuple = await self._cython_call.initial_metadata()
+ return Metadata.from_tuple(raw_metadata_tuple)
+
+ async def trailing_metadata(self) -> Metadata:
+ raw_metadata_tuple = (await
+ self._cython_call.status()).trailing_metadata()
+ return Metadata.from_tuple(raw_metadata_tuple)
+
+ async def code(self) -> grpc.StatusCode:
+ cygrpc_code = (await self._cython_call.status()).code()
+ return _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[cygrpc_code]
+
+ async def details(self) -> str:
+ return (await self._cython_call.status()).details()
+
+ async def debug_error_string(self) -> str:
+ return (await self._cython_call.status()).debug_error_string()
+
+ async def _raise_for_status(self) -> None:
+ if self._cython_call.is_locally_cancelled():
+ raise asyncio.CancelledError()
+ code = await self.code()
+ if code != grpc.StatusCode.OK:
+ raise _create_rpc_error(await self.initial_metadata(), await
+ self._cython_call.status())
+
+ def _repr(self) -> str:
+ return repr(self._cython_call)
+
+ def __repr__(self) -> str:
+ return self._repr()
+
+ def __str__(self) -> str:
+ return self._repr()
+
+
+class _APIStyle(enum.IntEnum):
+ UNKNOWN = 0
+ ASYNC_GENERATOR = 1
+ READER_WRITER = 2
+
+
+class _UnaryResponseMixin(Call):
+ _call_response: asyncio.Task
+
+ def _init_unary_response_mixin(self, response_task: asyncio.Task):
+ self._call_response = response_task
+
+ def cancel(self) -> bool:
+ if super().cancel():
+ self._call_response.cancel()
+ return True
+ else:
+ return False
+
+ def __await__(self) -> ResponseType:
+ """Wait till the ongoing RPC request finishes."""
+ try:
+ response = yield from self._call_response
+ except asyncio.CancelledError:
+ # Even if we caught all other CancelledError, there is still
+ # this corner case. If the application cancels immediately after
+ # the Call object is created, we will observe this
+ # `CancelledError`.
+ if not self.cancelled():
+ self.cancel()
+ raise
+
+ # NOTE(lidiz) If we raise RpcError in the task, and users doesn't
+ # 'await' on it. AsyncIO will log 'Task exception was never retrieved'.
+ # Instead, if we move the exception raising here, the spam stops.
+ # Unfortunately, there can only be one 'yield from' in '__await__'. So,
+ # we need to access the private instance variable.
+ if response is cygrpc.EOF:
+ if self._cython_call.is_locally_cancelled():
+ raise asyncio.CancelledError()
+ else:
+ raise _create_rpc_error(self._cython_call._initial_metadata,
+ self._cython_call._status)
+ else:
+ return response
+
+
+class _StreamResponseMixin(Call):
+ _message_aiter: AsyncIterator[ResponseType]
+ _preparation: asyncio.Task
+ _response_style: _APIStyle
+
+ def _init_stream_response_mixin(self, preparation: asyncio.Task):
+ self._message_aiter = None
+ self._preparation = preparation
+ self._response_style = _APIStyle.UNKNOWN
+
+ def _update_response_style(self, style: _APIStyle):
+ if self._response_style is _APIStyle.UNKNOWN:
+ self._response_style = style
+ elif self._response_style is not style:
+ raise cygrpc.UsageError(_API_STYLE_ERROR)
+
+ def cancel(self) -> bool:
+ if super().cancel():
+ self._preparation.cancel()
+ return True
+ else:
+ return False
+
+ async def _fetch_stream_responses(self) -> ResponseType:
+ message = await self._read()
+ while message is not cygrpc.EOF:
+ yield message
+ message = await self._read()
+
+ # If the read operation failed, Core should explain why.
+ await self._raise_for_status()
+
+ def __aiter__(self) -> AsyncIterator[ResponseType]:
+ self._update_response_style(_APIStyle.ASYNC_GENERATOR)
+ if self._message_aiter is None:
+ self._message_aiter = self._fetch_stream_responses()
+ return self._message_aiter
+
+ async def _read(self) -> ResponseType:
+ # Wait for the request being sent
+ await self._preparation
+
+ # Reads response message from Core
+ try:
+ raw_response = await self._cython_call.receive_serialized_message()
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+ raise
+
+ if raw_response is cygrpc.EOF:
+ return cygrpc.EOF
+ else:
+ return _common.deserialize(raw_response,
+ self._response_deserializer)
+
+ async def read(self) -> ResponseType:
+ if self.done():
+ await self._raise_for_status()
+ return cygrpc.EOF
+ self._update_response_style(_APIStyle.READER_WRITER)
+
+ response_message = await self._read()
+
+ if response_message is cygrpc.EOF:
+ # If the read operation failed, Core should explain why.
+ await self._raise_for_status()
+ return response_message
+
+
+class _StreamRequestMixin(Call):
+ _metadata_sent: asyncio.Event
+ _done_writing_flag: bool
+ _async_request_poller: Optional[asyncio.Task]
+ _request_style: _APIStyle
+
+ def _init_stream_request_mixin(
+ self, request_iterator: Optional[RequestIterableType]):
+ self._metadata_sent = asyncio.Event()
+ self._done_writing_flag = False
+
+ # If user passes in an async iterator, create a consumer Task.
+ if request_iterator is not None:
+ self._async_request_poller = self._loop.create_task(
+ self._consume_request_iterator(request_iterator))
+ self._request_style = _APIStyle.ASYNC_GENERATOR
+ else:
+ self._async_request_poller = None
+ self._request_style = _APIStyle.READER_WRITER
+
+ def _raise_for_different_style(self, style: _APIStyle):
+ if self._request_style is not style:
+ raise cygrpc.UsageError(_API_STYLE_ERROR)
+
+ def cancel(self) -> bool:
+ if super().cancel():
+ if self._async_request_poller is not None:
+ self._async_request_poller.cancel()
+ return True
+ else:
+ return False
+
+ def _metadata_sent_observer(self):
+ self._metadata_sent.set()
+
+ async def _consume_request_iterator(
+ self, request_iterator: RequestIterableType) -> None:
+ try:
+ if inspect.isasyncgen(request_iterator) or hasattr(
+ request_iterator, '__aiter__'):
+ async for request in request_iterator:
+ try:
+ await self._write(request)
+ except AioRpcError as rpc_error:
+ _LOGGER.debug(
+ 'Exception while consuming the request_iterator: %s',
+ rpc_error)
+ return
+ else:
+ for request in request_iterator:
+ try:
+ await self._write(request)
+ except AioRpcError as rpc_error:
+ _LOGGER.debug(
+ 'Exception while consuming the request_iterator: %s',
+ rpc_error)
+ return
+
+ await self._done_writing()
+ except: # pylint: disable=bare-except
+ # Client iterators can raise exceptions, which we should handle by
+ # cancelling the RPC and logging the client's error. No exceptions
+ # should escape this function.
+ _LOGGER.debug('Client request_iterator raised exception:\n%s',
+ traceback.format_exc())
+ self.cancel()
+
+ async def _write(self, request: RequestType) -> None:
+ if self.done():
+ raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
+ if self._done_writing_flag:
+ raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS)
+ if not self._metadata_sent.is_set():
+ await self._metadata_sent.wait()
+ if self.done():
+ await self._raise_for_status()
+
+ serialized_request = _common.serialize(request,
+ self._request_serializer)
+ try:
+ await self._cython_call.send_serialized_message(serialized_request)
+ except cygrpc.InternalError:
+ await self._raise_for_status()
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+ raise
+
+ async def _done_writing(self) -> None:
+ if self.done():
+ # If the RPC is finished, do nothing.
+ return
+ if not self._done_writing_flag:
+ # If the done writing is not sent before, try to send it.
+ self._done_writing_flag = True
+ try:
+ await self._cython_call.send_receive_close()
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+ raise
+
+ async def write(self, request: RequestType) -> None:
+ self._raise_for_different_style(_APIStyle.READER_WRITER)
+ await self._write(request)
+
+ async def done_writing(self) -> None:
+ """Signal peer that client is done writing.
+
+ This method is idempotent.
+ """
+ self._raise_for_different_style(_APIStyle.READER_WRITER)
+ await self._done_writing()
+
+ async def wait_for_connection(self) -> None:
+ await self._metadata_sent.wait()
+ if self.done():
+ await self._raise_for_status()
+
+
+class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall):
+ """Object for managing unary-unary RPC calls.
+
+ Returned when an instance of `UnaryUnaryMultiCallable` object is called.
+ """
+ _request: RequestType
+ _invocation_task: asyncio.Task
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, request: RequestType, deadline: Optional[float],
+ metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ super().__init__(
+ channel.call(method, deadline, credentials, wait_for_ready),
+ metadata, request_serializer, response_deserializer, loop)
+ self._request = request
+ self._invocation_task = loop.create_task(self._invoke())
+ self._init_unary_response_mixin(self._invocation_task)
+
+ async def _invoke(self) -> ResponseType:
+ serialized_request = _common.serialize(self._request,
+ self._request_serializer)
+
+ # NOTE(lidiz) asyncio.CancelledError is not a good transport for status,
+ # because the asyncio.Task class do not cache the exception object.
+ # https://github.com/python/cpython/blob/edad4d89e357c92f70c0324b937845d652b20afd/Lib/asyncio/tasks.py#L785
+ try:
+ serialized_response = await self._cython_call.unary_unary(
+ serialized_request, self._metadata)
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+
+ if self._cython_call.is_ok():
+ return _common.deserialize(serialized_response,
+ self._response_deserializer)
+ else:
+ return cygrpc.EOF
+
+ async def wait_for_connection(self) -> None:
+ await self._invocation_task
+ if self.done():
+ await self._raise_for_status()
+
+
+class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall):
+ """Object for managing unary-stream RPC calls.
+
+ Returned when an instance of `UnaryStreamMultiCallable` object is called.
+ """
+ _request: RequestType
+ _send_unary_request_task: asyncio.Task
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, request: RequestType, deadline: Optional[float],
+ metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ super().__init__(
+ channel.call(method, deadline, credentials, wait_for_ready),
+ metadata, request_serializer, response_deserializer, loop)
+ self._request = request
+ self._send_unary_request_task = loop.create_task(
+ self._send_unary_request())
+ self._init_stream_response_mixin(self._send_unary_request_task)
+
+ async def _send_unary_request(self) -> ResponseType:
+ serialized_request = _common.serialize(self._request,
+ self._request_serializer)
+ try:
+ await self._cython_call.initiate_unary_stream(
+ serialized_request, self._metadata)
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+ raise
+
+ async def wait_for_connection(self) -> None:
+ await self._send_unary_request_task
+ if self.done():
+ await self._raise_for_status()
+
+
+class StreamUnaryCall(_StreamRequestMixin, _UnaryResponseMixin, Call,
+ _base_call.StreamUnaryCall):
+ """Object for managing stream-unary RPC calls.
+
+ Returned when an instance of `StreamUnaryMultiCallable` object is called.
+ """
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, request_iterator: Optional[RequestIterableType],
+ deadline: Optional[float], metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ super().__init__(
+ channel.call(method, deadline, credentials, wait_for_ready),
+ metadata, request_serializer, response_deserializer, loop)
+
+ self._init_stream_request_mixin(request_iterator)
+ self._init_unary_response_mixin(loop.create_task(self._conduct_rpc()))
+
+ async def _conduct_rpc(self) -> ResponseType:
+ try:
+ serialized_response = await self._cython_call.stream_unary(
+ self._metadata, self._metadata_sent_observer)
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+ raise
+
+ if self._cython_call.is_ok():
+ return _common.deserialize(serialized_response,
+ self._response_deserializer)
+ else:
+ return cygrpc.EOF
+
+
+class StreamStreamCall(_StreamRequestMixin, _StreamResponseMixin, Call,
+ _base_call.StreamStreamCall):
+ """Object for managing stream-stream RPC calls.
+
+ Returned when an instance of `StreamStreamMultiCallable` object is called.
+ """
+ _initializer: asyncio.Task
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, request_iterator: Optional[RequestIterableType],
+ deadline: Optional[float], metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ super().__init__(
+ channel.call(method, deadline, credentials, wait_for_ready),
+ metadata, request_serializer, response_deserializer, loop)
+ self._initializer = self._loop.create_task(self._prepare_rpc())
+ self._init_stream_request_mixin(request_iterator)
+ self._init_stream_response_mixin(self._initializer)
+
+ async def _prepare_rpc(self):
+ """This method prepares the RPC for receiving/sending messages.
+
+ All other operations around the stream should only happen after the
+ completion of this method.
+ """
+ try:
+ await self._cython_call.initiate_stream_stream(
+ self._metadata, self._metadata_sent_observer)
+ except asyncio.CancelledError:
+ if not self.cancelled():
+ self.cancel()
+ # No need to raise RpcError here, because no one will `await` this task.
diff --git a/contrib/python/grpcio/py3/grpc/aio/_channel.py b/contrib/python/grpcio/py3/grpc/aio/_channel.py
new file mode 100644
index 0000000000..a6fb222125
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_channel.py
@@ -0,0 +1,492 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Invocation-side implementation of gRPC Asyncio Python."""
+
+import asyncio
+import sys
+from typing import Any, Iterable, List, Optional, Sequence
+
+import grpc
+from grpc import _common
+from grpc import _compression
+from grpc import _grpcio_metadata
+from grpc._cython import cygrpc
+
+from . import _base_call
+from . import _base_channel
+from ._call import StreamStreamCall
+from ._call import StreamUnaryCall
+from ._call import UnaryStreamCall
+from ._call import UnaryUnaryCall
+from ._interceptor import ClientInterceptor
+from ._interceptor import InterceptedStreamStreamCall
+from ._interceptor import InterceptedStreamUnaryCall
+from ._interceptor import InterceptedUnaryStreamCall
+from ._interceptor import InterceptedUnaryUnaryCall
+from ._interceptor import StreamStreamClientInterceptor
+from ._interceptor import StreamUnaryClientInterceptor
+from ._interceptor import UnaryStreamClientInterceptor
+from ._interceptor import UnaryUnaryClientInterceptor
+from ._metadata import Metadata
+from ._typing import ChannelArgumentType
+from ._typing import DeserializingFunction
+from ._typing import RequestIterableType
+from ._typing import SerializingFunction
+from ._utils import _timeout_to_deadline
+
+_USER_AGENT = 'grpc-python-asyncio/{}'.format(_grpcio_metadata.__version__)
+
+if sys.version_info[1] < 7:
+
+ def _all_tasks() -> Iterable[asyncio.Task]:
+ return asyncio.Task.all_tasks()
+else:
+
+ def _all_tasks() -> Iterable[asyncio.Task]:
+ return asyncio.all_tasks()
+
+
+def _augment_channel_arguments(base_options: ChannelArgumentType,
+ compression: Optional[grpc.Compression]):
+ compression_channel_argument = _compression.create_channel_option(
+ compression)
+ user_agent_channel_argument = ((
+ cygrpc.ChannelArgKey.primary_user_agent_string,
+ _USER_AGENT,
+ ),)
+ return tuple(base_options
+ ) + compression_channel_argument + user_agent_channel_argument
+
+
+class _BaseMultiCallable:
+ """Base class of all multi callable objects.
+
+ Handles the initialization logic and stores common attributes.
+ """
+ _loop: asyncio.AbstractEventLoop
+ _channel: cygrpc.AioChannel
+ _method: bytes
+ _request_serializer: SerializingFunction
+ _response_deserializer: DeserializingFunction
+ _interceptors: Optional[Sequence[ClientInterceptor]]
+ _references: List[Any]
+ _loop: asyncio.AbstractEventLoop
+
+ # pylint: disable=too-many-arguments
+ def __init__(
+ self,
+ channel: cygrpc.AioChannel,
+ method: bytes,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ interceptors: Optional[Sequence[ClientInterceptor]],
+ references: List[Any],
+ loop: asyncio.AbstractEventLoop,
+ ) -> None:
+ self._loop = loop
+ self._channel = channel
+ self._method = method
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+ self._interceptors = interceptors
+ self._references = references
+
+ @staticmethod
+ def _init_metadata(
+ metadata: Optional[Metadata] = None,
+ compression: Optional[grpc.Compression] = None) -> Metadata:
+ """Based on the provided values for <metadata> or <compression> initialise the final
+ metadata, as it should be used for the current call.
+ """
+ metadata = metadata or Metadata()
+ if compression:
+ metadata = Metadata(
+ *_compression.augment_metadata(metadata, compression))
+ return metadata
+
+
+class UnaryUnaryMultiCallable(_BaseMultiCallable,
+ _base_channel.UnaryUnaryMultiCallable):
+
+ def __call__(
+ self,
+ request: Any,
+ *,
+ timeout: Optional[float] = None,
+ metadata: Optional[Metadata] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.UnaryUnaryCall:
+
+ metadata = self._init_metadata(metadata, compression)
+ if not self._interceptors:
+ call = UnaryUnaryCall(request, _timeout_to_deadline(timeout),
+ metadata, credentials, wait_for_ready,
+ self._channel, self._method,
+ self._request_serializer,
+ self._response_deserializer, self._loop)
+ else:
+ call = InterceptedUnaryUnaryCall(
+ self._interceptors, request, timeout, metadata, credentials,
+ wait_for_ready, self._channel, self._method,
+ self._request_serializer, self._response_deserializer,
+ self._loop)
+
+ return call
+
+
+class UnaryStreamMultiCallable(_BaseMultiCallable,
+ _base_channel.UnaryStreamMultiCallable):
+
+ def __call__(
+ self,
+ request: Any,
+ *,
+ timeout: Optional[float] = None,
+ metadata: Optional[Metadata] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.UnaryStreamCall:
+
+ metadata = self._init_metadata(metadata, compression)
+ deadline = _timeout_to_deadline(timeout)
+
+ if not self._interceptors:
+ call = UnaryStreamCall(request, deadline, metadata, credentials,
+ wait_for_ready, self._channel, self._method,
+ self._request_serializer,
+ self._response_deserializer, self._loop)
+ else:
+ call = InterceptedUnaryStreamCall(
+ self._interceptors, request, deadline, metadata, credentials,
+ wait_for_ready, self._channel, self._method,
+ self._request_serializer, self._response_deserializer,
+ self._loop)
+
+ return call
+
+
+class StreamUnaryMultiCallable(_BaseMultiCallable,
+ _base_channel.StreamUnaryMultiCallable):
+
+ def __call__(
+ self,
+ request_iterator: Optional[RequestIterableType] = None,
+ timeout: Optional[float] = None,
+ metadata: Optional[Metadata] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.StreamUnaryCall:
+
+ metadata = self._init_metadata(metadata, compression)
+ deadline = _timeout_to_deadline(timeout)
+
+ if not self._interceptors:
+ call = StreamUnaryCall(request_iterator, deadline, metadata,
+ credentials, wait_for_ready, self._channel,
+ self._method, self._request_serializer,
+ self._response_deserializer, self._loop)
+ else:
+ call = InterceptedStreamUnaryCall(
+ self._interceptors, request_iterator, deadline, metadata,
+ credentials, wait_for_ready, self._channel, self._method,
+ self._request_serializer, self._response_deserializer,
+ self._loop)
+
+ return call
+
+
+class StreamStreamMultiCallable(_BaseMultiCallable,
+ _base_channel.StreamStreamMultiCallable):
+
+ def __call__(
+ self,
+ request_iterator: Optional[RequestIterableType] = None,
+ timeout: Optional[float] = None,
+ metadata: Optional[Metadata] = None,
+ credentials: Optional[grpc.CallCredentials] = None,
+ wait_for_ready: Optional[bool] = None,
+ compression: Optional[grpc.Compression] = None
+ ) -> _base_call.StreamStreamCall:
+
+ metadata = self._init_metadata(metadata, compression)
+ deadline = _timeout_to_deadline(timeout)
+
+ if not self._interceptors:
+ call = StreamStreamCall(request_iterator, deadline, metadata,
+ credentials, wait_for_ready, self._channel,
+ self._method, self._request_serializer,
+ self._response_deserializer, self._loop)
+ else:
+ call = InterceptedStreamStreamCall(
+ self._interceptors, request_iterator, deadline, metadata,
+ credentials, wait_for_ready, self._channel, self._method,
+ self._request_serializer, self._response_deserializer,
+ self._loop)
+
+ return call
+
+
+class Channel(_base_channel.Channel):
+ _loop: asyncio.AbstractEventLoop
+ _channel: cygrpc.AioChannel
+ _unary_unary_interceptors: List[UnaryUnaryClientInterceptor]
+ _unary_stream_interceptors: List[UnaryStreamClientInterceptor]
+ _stream_unary_interceptors: List[StreamUnaryClientInterceptor]
+ _stream_stream_interceptors: List[StreamStreamClientInterceptor]
+
+ def __init__(self, target: str, options: ChannelArgumentType,
+ credentials: Optional[grpc.ChannelCredentials],
+ compression: Optional[grpc.Compression],
+ interceptors: Optional[Sequence[ClientInterceptor]]):
+ """Constructor.
+
+ Args:
+ target: The target to which to connect.
+ options: Configuration options for the channel.
+ credentials: A cygrpc.ChannelCredentials or None.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+ interceptors: An optional list of interceptors that would be used for
+ intercepting any RPC executed with that channel.
+ """
+ self._unary_unary_interceptors = []
+ self._unary_stream_interceptors = []
+ self._stream_unary_interceptors = []
+ self._stream_stream_interceptors = []
+
+ if interceptors is not None:
+ for interceptor in interceptors:
+ if isinstance(interceptor, UnaryUnaryClientInterceptor):
+ self._unary_unary_interceptors.append(interceptor)
+ elif isinstance(interceptor, UnaryStreamClientInterceptor):
+ self._unary_stream_interceptors.append(interceptor)
+ elif isinstance(interceptor, StreamUnaryClientInterceptor):
+ self._stream_unary_interceptors.append(interceptor)
+ elif isinstance(interceptor, StreamStreamClientInterceptor):
+ self._stream_stream_interceptors.append(interceptor)
+ else:
+ raise ValueError(
+ "Interceptor {} must be ".format(interceptor) +
+ "{} or ".format(UnaryUnaryClientInterceptor.__name__) +
+ "{} or ".format(UnaryStreamClientInterceptor.__name__) +
+ "{} or ".format(StreamUnaryClientInterceptor.__name__) +
+ "{}. ".format(StreamStreamClientInterceptor.__name__))
+
+ self._loop = cygrpc.get_working_loop()
+ self._channel = cygrpc.AioChannel(
+ _common.encode(target),
+ _augment_channel_arguments(options, compression), credentials,
+ self._loop)
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self._close(None)
+
+ async def _close(self, grace): # pylint: disable=too-many-branches
+ if self._channel.closed():
+ return
+
+ # No new calls will be accepted by the Cython channel.
+ self._channel.closing()
+
+ # Iterate through running tasks
+ tasks = _all_tasks()
+ calls = []
+ call_tasks = []
+ for task in tasks:
+ try:
+ stack = task.get_stack(limit=1)
+ except AttributeError as attribute_error:
+ # NOTE(lidiz) tl;dr: If the Task is created with a CPython
+ # object, it will trigger AttributeError.
+ #
+ # In the global finalizer, the event loop schedules
+ # a CPython PyAsyncGenAThrow object.
+ # https://github.com/python/cpython/blob/00e45877e33d32bb61aa13a2033e3bba370bda4d/Lib/asyncio/base_events.py#L484
+ #
+ # However, the PyAsyncGenAThrow object is written in C and
+ # failed to include the normal Python frame objects. Hence,
+ # this exception is a false negative, and it is safe to ignore
+ # the failure. It is fixed by https://github.com/python/cpython/pull/18669,
+ # but not available until 3.9 or 3.8.3. So, we have to keep it
+ # for a while.
+ # TODO(lidiz) drop this hack after 3.8 deprecation
+ if 'frame' in str(attribute_error):
+ continue
+ else:
+ raise
+
+ # If the Task is created by a C-extension, the stack will be empty.
+ if not stack:
+ continue
+
+ # Locate ones created by `aio.Call`.
+ frame = stack[0]
+ candidate = frame.f_locals.get('self')
+ if candidate:
+ if isinstance(candidate, _base_call.Call):
+ if hasattr(candidate, '_channel'):
+ # For intercepted Call object
+ if candidate._channel is not self._channel:
+ continue
+ elif hasattr(candidate, '_cython_call'):
+ # For normal Call object
+ if candidate._cython_call._channel is not self._channel:
+ continue
+ else:
+ # Unidentified Call object
+ raise cygrpc.InternalError(
+ f'Unrecognized call object: {candidate}')
+
+ calls.append(candidate)
+ call_tasks.append(task)
+
+ # If needed, try to wait for them to finish.
+ # Call objects are not always awaitables.
+ if grace and call_tasks:
+ await asyncio.wait(call_tasks, timeout=grace)
+
+ # Time to cancel existing calls.
+ for call in calls:
+ call.cancel()
+
+ # Destroy the channel
+ self._channel.close()
+
+ async def close(self, grace: Optional[float] = None):
+ await self._close(grace)
+
+ def __del__(self):
+ if hasattr(self, '_channel'):
+ if not self._channel.closed():
+ self._channel.close()
+
+ def get_state(self,
+ try_to_connect: bool = False) -> grpc.ChannelConnectivity:
+ result = self._channel.check_connectivity_state(try_to_connect)
+ return _common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[result]
+
+ async def wait_for_state_change(
+ self,
+ last_observed_state: grpc.ChannelConnectivity,
+ ) -> None:
+ assert await self._channel.watch_connectivity_state(
+ last_observed_state.value[0], None)
+
+ async def channel_ready(self) -> None:
+ state = self.get_state(try_to_connect=True)
+ while state != grpc.ChannelConnectivity.READY:
+ await self.wait_for_state_change(state)
+ state = self.get_state(try_to_connect=True)
+
+ def unary_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> UnaryUnaryMultiCallable:
+ return UnaryUnaryMultiCallable(self._channel, _common.encode(method),
+ request_serializer,
+ response_deserializer,
+ self._unary_unary_interceptors, [self],
+ self._loop)
+
+ def unary_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> UnaryStreamMultiCallable:
+ return UnaryStreamMultiCallable(self._channel, _common.encode(method),
+ request_serializer,
+ response_deserializer,
+ self._unary_stream_interceptors, [self],
+ self._loop)
+
+ def stream_unary(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> StreamUnaryMultiCallable:
+ return StreamUnaryMultiCallable(self._channel, _common.encode(method),
+ request_serializer,
+ response_deserializer,
+ self._stream_unary_interceptors, [self],
+ self._loop)
+
+ def stream_stream(
+ self,
+ method: str,
+ request_serializer: Optional[SerializingFunction] = None,
+ response_deserializer: Optional[DeserializingFunction] = None
+ ) -> StreamStreamMultiCallable:
+ return StreamStreamMultiCallable(self._channel, _common.encode(method),
+ request_serializer,
+ response_deserializer,
+ self._stream_stream_interceptors,
+ [self], self._loop)
+
+
+def insecure_channel(
+ target: str,
+ options: Optional[ChannelArgumentType] = None,
+ compression: Optional[grpc.Compression] = None,
+ interceptors: Optional[Sequence[ClientInterceptor]] = None):
+ """Creates an insecure asynchronous Channel to a server.
+
+ Args:
+ target: The server address
+ options: An optional list of key-value pairs (:term:`channel_arguments`
+ in gRPC Core runtime) to configure the channel.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+ interceptors: An optional sequence of interceptors that will be executed for
+ any call executed with this channel.
+
+ Returns:
+ A Channel.
+ """
+ return Channel(target, () if options is None else options, None,
+ compression, interceptors)
+
+
+def secure_channel(target: str,
+ credentials: grpc.ChannelCredentials,
+ options: Optional[ChannelArgumentType] = None,
+ compression: Optional[grpc.Compression] = None,
+ interceptors: Optional[Sequence[ClientInterceptor]] = None):
+ """Creates a secure asynchronous Channel to a server.
+
+ Args:
+ target: The server address.
+ credentials: A ChannelCredentials instance.
+ options: An optional list of key-value pairs (:term:`channel_arguments`
+ in gRPC Core runtime) to configure the channel.
+ compression: An optional value indicating the compression method to be
+ used over the lifetime of the channel.
+ interceptors: An optional sequence of interceptors that will be executed for
+ any call executed with this channel.
+
+ Returns:
+ An aio.Channel.
+ """
+ return Channel(target, () if options is None else options,
+ credentials._credentials, compression, interceptors)
diff --git a/contrib/python/grpcio/py3/grpc/aio/_interceptor.py b/contrib/python/grpcio/py3/grpc/aio/_interceptor.py
new file mode 100644
index 0000000000..05f166e3b0
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_interceptor.py
@@ -0,0 +1,1001 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interceptors implementation of gRPC Asyncio Python."""
+from abc import ABCMeta
+from abc import abstractmethod
+import asyncio
+import collections
+import functools
+from typing import (AsyncIterable, Awaitable, Callable, Iterator, List,
+ Optional, Sequence, Union)
+
+import grpc
+from grpc._cython import cygrpc
+
+from . import _base_call
+from ._call import AioRpcError
+from ._call import StreamStreamCall
+from ._call import StreamUnaryCall
+from ._call import UnaryStreamCall
+from ._call import UnaryUnaryCall
+from ._call import _API_STYLE_ERROR
+from ._call import _RPC_ALREADY_FINISHED_DETAILS
+from ._call import _RPC_HALF_CLOSED_DETAILS
+from ._metadata import Metadata
+from ._typing import DeserializingFunction
+from ._typing import DoneCallbackType
+from ._typing import RequestIterableType
+from ._typing import RequestType
+from ._typing import ResponseIterableType
+from ._typing import ResponseType
+from ._typing import SerializingFunction
+from ._utils import _timeout_to_deadline
+
+_LOCAL_CANCELLATION_DETAILS = 'Locally cancelled by application!'
+
+
+class ServerInterceptor(metaclass=ABCMeta):
+ """Affords intercepting incoming RPCs on the service-side.
+
+ This is an EXPERIMENTAL API.
+ """
+
+ @abstractmethod
+ async def intercept_service(
+ self, continuation: Callable[[grpc.HandlerCallDetails],
+ Awaitable[grpc.RpcMethodHandler]],
+ handler_call_details: grpc.HandlerCallDetails
+ ) -> grpc.RpcMethodHandler:
+ """Intercepts incoming RPCs before handing them over to a handler.
+
+ Args:
+ continuation: A function that takes a HandlerCallDetails and
+ proceeds to invoke the next interceptor in the chain, if any,
+ or the RPC handler lookup logic, with the call details passed
+ as an argument, and returns an RpcMethodHandler instance if
+ the RPC is considered serviced, or None otherwise.
+ handler_call_details: A HandlerCallDetails describing the RPC.
+
+ Returns:
+ An RpcMethodHandler with which the RPC may be serviced if the
+ interceptor chooses to service this RPC, or None otherwise.
+ """
+
+
+class ClientCallDetails(
+ collections.namedtuple(
+ 'ClientCallDetails',
+ ('method', 'timeout', 'metadata', 'credentials', 'wait_for_ready')),
+ grpc.ClientCallDetails):
+ """Describes an RPC to be invoked.
+
+ This is an EXPERIMENTAL API.
+
+ Args:
+ method: The method name of the RPC.
+ timeout: An optional duration of time in seconds to allow for the RPC.
+ metadata: Optional metadata to be transmitted to the service-side of
+ the RPC.
+ credentials: An optional CallCredentials for the RPC.
+ wait_for_ready: An optional flag to enable :term:`wait_for_ready` mechanism.
+ """
+
+ method: str
+ timeout: Optional[float]
+ metadata: Optional[Metadata]
+ credentials: Optional[grpc.CallCredentials]
+ wait_for_ready: Optional[bool]
+
+
+class ClientInterceptor(metaclass=ABCMeta):
+ """Base class used for all Aio Client Interceptor classes"""
+
+
+class UnaryUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
+ """Affords intercepting unary-unary invocations."""
+
+ @abstractmethod
+ async def intercept_unary_unary(
+ self, continuation: Callable[[ClientCallDetails, RequestType],
+ UnaryUnaryCall],
+ client_call_details: ClientCallDetails,
+ request: RequestType) -> Union[UnaryUnaryCall, ResponseType]:
+ """Intercepts a unary-unary invocation asynchronously.
+
+ Args:
+ continuation: A coroutine that proceeds with the invocation by
+ executing the next interceptor in the chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `call = await continuation(client_call_details, request)`
+ to continue with the RPC. `continuation` returns the call to the
+ RPC.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request: The request value for the RPC.
+
+ Returns:
+ An object with the RPC response.
+
+ Raises:
+ AioRpcError: Indicating that the RPC terminated with non-OK status.
+ asyncio.CancelledError: Indicating that the RPC was canceled.
+ """
+
+
+class UnaryStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
+ """Affords intercepting unary-stream invocations."""
+
+ @abstractmethod
+ async def intercept_unary_stream(
+ self, continuation: Callable[[ClientCallDetails, RequestType],
+ UnaryStreamCall],
+ client_call_details: ClientCallDetails, request: RequestType
+ ) -> Union[ResponseIterableType, UnaryStreamCall]:
+ """Intercepts a unary-stream invocation asynchronously.
+
+ The function could return the call object or an asynchronous
+ iterator, in case of being an asyncrhonous iterator this will
+ become the source of the reads done by the caller.
+
+ Args:
+ continuation: A coroutine that proceeds with the invocation by
+ executing the next interceptor in the chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `call = await continuation(client_call_details, request)`
+ to continue with the RPC. `continuation` returns the call to the
+ RPC.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request: The request value for the RPC.
+
+ Returns:
+ The RPC Call or an asynchronous iterator.
+
+ Raises:
+ AioRpcError: Indicating that the RPC terminated with non-OK status.
+ asyncio.CancelledError: Indicating that the RPC was canceled.
+ """
+
+
+class StreamUnaryClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
+ """Affords intercepting stream-unary invocations."""
+
+ @abstractmethod
+ async def intercept_stream_unary(
+ self,
+ continuation: Callable[[ClientCallDetails, RequestType],
+ StreamUnaryCall],
+ client_call_details: ClientCallDetails,
+ request_iterator: RequestIterableType,
+ ) -> StreamUnaryCall:
+ """Intercepts a stream-unary invocation asynchronously.
+
+ Within the interceptor the usage of the call methods like `write` or
+ even awaiting the call should be done carefully, since the caller
+ could be expecting an untouched call, for example for start writing
+ messages to it.
+
+ Args:
+ continuation: A coroutine that proceeds with the invocation by
+ executing the next interceptor in the chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `call = await continuation(client_call_details, request_iterator)`
+ to continue with the RPC. `continuation` returns the call to the
+ RPC.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request_iterator: The request iterator that will produce requests
+ for the RPC.
+
+ Returns:
+ The RPC Call.
+
+ Raises:
+ AioRpcError: Indicating that the RPC terminated with non-OK status.
+ asyncio.CancelledError: Indicating that the RPC was canceled.
+ """
+
+
+class StreamStreamClientInterceptor(ClientInterceptor, metaclass=ABCMeta):
+ """Affords intercepting stream-stream invocations."""
+
+ @abstractmethod
+ async def intercept_stream_stream(
+ self,
+ continuation: Callable[[ClientCallDetails, RequestType],
+ StreamStreamCall],
+ client_call_details: ClientCallDetails,
+ request_iterator: RequestIterableType,
+ ) -> Union[ResponseIterableType, StreamStreamCall]:
+ """Intercepts a stream-stream invocation asynchronously.
+
+ Within the interceptor the usage of the call methods like `write` or
+ even awaiting the call should be done carefully, since the caller
+ could be expecting an untouched call, for example for start writing
+ messages to it.
+
+ The function could return the call object or an asynchronous
+ iterator, in case of being an asyncrhonous iterator this will
+ become the source of the reads done by the caller.
+
+ Args:
+ continuation: A coroutine that proceeds with the invocation by
+ executing the next interceptor in the chain or invoking the
+ actual RPC on the underlying Channel. It is the interceptor's
+ responsibility to call it if it decides to move the RPC forward.
+ The interceptor can use
+ `call = await continuation(client_call_details, request_iterator)`
+ to continue with the RPC. `continuation` returns the call to the
+ RPC.
+ client_call_details: A ClientCallDetails object describing the
+ outgoing RPC.
+ request_iterator: The request iterator that will produce requests
+ for the RPC.
+
+ Returns:
+ The RPC Call or an asynchronous iterator.
+
+ Raises:
+ AioRpcError: Indicating that the RPC terminated with non-OK status.
+ asyncio.CancelledError: Indicating that the RPC was canceled.
+ """
+
+
+class InterceptedCall:
+ """Base implementation for all intercepted call arities.
+
+ Interceptors might have some work to do before the RPC invocation with
+ the capacity of changing the invocation parameters, and some work to do
+ after the RPC invocation with the capacity for accessing to the wrapped
+ `UnaryUnaryCall`.
+
+ It handles also early and later cancellations, when the RPC has not even
+ started and the execution is still held by the interceptors or when the
+ RPC has finished but again the execution is still held by the interceptors.
+
+ Once the RPC is finally executed, all methods are finally done against the
+ intercepted call, being at the same time the same call returned to the
+ interceptors.
+
+ As a base class for all of the interceptors implements the logic around
+ final status, metadata and cancellation.
+ """
+
+ _interceptors_task: asyncio.Task
+ _pending_add_done_callbacks: Sequence[DoneCallbackType]
+
+ def __init__(self, interceptors_task: asyncio.Task) -> None:
+ self._interceptors_task = interceptors_task
+ self._pending_add_done_callbacks = []
+ self._interceptors_task.add_done_callback(
+ self._fire_or_add_pending_done_callbacks)
+
+ def __del__(self):
+ self.cancel()
+
+ def _fire_or_add_pending_done_callbacks(
+ self, interceptors_task: asyncio.Task) -> None:
+
+ if not self._pending_add_done_callbacks:
+ return
+
+ call_completed = False
+
+ try:
+ call = interceptors_task.result()
+ if call.done():
+ call_completed = True
+ except (AioRpcError, asyncio.CancelledError):
+ call_completed = True
+
+ if call_completed:
+ for callback in self._pending_add_done_callbacks:
+ callback(self)
+ else:
+ for callback in self._pending_add_done_callbacks:
+ callback = functools.partial(self._wrap_add_done_callback,
+ callback)
+ call.add_done_callback(callback)
+
+ self._pending_add_done_callbacks = []
+
+ def _wrap_add_done_callback(self, callback: DoneCallbackType,
+ unused_call: _base_call.Call) -> None:
+ callback(self)
+
+ def cancel(self) -> bool:
+ if not self._interceptors_task.done():
+ # There is no yet the intercepted call available,
+ # Trying to cancel it by using the generic Asyncio
+ # cancellation method.
+ return self._interceptors_task.cancel()
+
+ try:
+ call = self._interceptors_task.result()
+ except AioRpcError:
+ return False
+ except asyncio.CancelledError:
+ return False
+
+ return call.cancel()
+
+ def cancelled(self) -> bool:
+ if not self._interceptors_task.done():
+ return False
+
+ try:
+ call = self._interceptors_task.result()
+ except AioRpcError as err:
+ return err.code() == grpc.StatusCode.CANCELLED
+ except asyncio.CancelledError:
+ return True
+
+ return call.cancelled()
+
+ def done(self) -> bool:
+ if not self._interceptors_task.done():
+ return False
+
+ try:
+ call = self._interceptors_task.result()
+ except (AioRpcError, asyncio.CancelledError):
+ return True
+
+ return call.done()
+
+ def add_done_callback(self, callback: DoneCallbackType) -> None:
+ if not self._interceptors_task.done():
+ self._pending_add_done_callbacks.append(callback)
+ return
+
+ try:
+ call = self._interceptors_task.result()
+ except (AioRpcError, asyncio.CancelledError):
+ callback(self)
+ return
+
+ if call.done():
+ callback(self)
+ else:
+ callback = functools.partial(self._wrap_add_done_callback, callback)
+ call.add_done_callback(callback)
+
+ def time_remaining(self) -> Optional[float]:
+ raise NotImplementedError()
+
+ async def initial_metadata(self) -> Optional[Metadata]:
+ try:
+ call = await self._interceptors_task
+ except AioRpcError as err:
+ return err.initial_metadata()
+ except asyncio.CancelledError:
+ return None
+
+ return await call.initial_metadata()
+
+ async def trailing_metadata(self) -> Optional[Metadata]:
+ try:
+ call = await self._interceptors_task
+ except AioRpcError as err:
+ return err.trailing_metadata()
+ except asyncio.CancelledError:
+ return None
+
+ return await call.trailing_metadata()
+
+ async def code(self) -> grpc.StatusCode:
+ try:
+ call = await self._interceptors_task
+ except AioRpcError as err:
+ return err.code()
+ except asyncio.CancelledError:
+ return grpc.StatusCode.CANCELLED
+
+ return await call.code()
+
+ async def details(self) -> str:
+ try:
+ call = await self._interceptors_task
+ except AioRpcError as err:
+ return err.details()
+ except asyncio.CancelledError:
+ return _LOCAL_CANCELLATION_DETAILS
+
+ return await call.details()
+
+ async def debug_error_string(self) -> Optional[str]:
+ try:
+ call = await self._interceptors_task
+ except AioRpcError as err:
+ return err.debug_error_string()
+ except asyncio.CancelledError:
+ return ''
+
+ return await call.debug_error_string()
+
+ async def wait_for_connection(self) -> None:
+ call = await self._interceptors_task
+ return await call.wait_for_connection()
+
+
+class _InterceptedUnaryResponseMixin:
+
+ def __await__(self):
+ call = yield from self._interceptors_task.__await__()
+ response = yield from call.__await__()
+ return response
+
+
+class _InterceptedStreamResponseMixin:
+ _response_aiter: Optional[AsyncIterable[ResponseType]]
+
+ def _init_stream_response_mixin(self) -> None:
+ # Is initalized later, otherwise if the iterator is not finnally
+ # consumed a logging warning is emmited by Asyncio.
+ self._response_aiter = None
+
+ async def _wait_for_interceptor_task_response_iterator(
+ self) -> ResponseType:
+ call = await self._interceptors_task
+ async for response in call:
+ yield response
+
+ def __aiter__(self) -> AsyncIterable[ResponseType]:
+ if self._response_aiter is None:
+ self._response_aiter = self._wait_for_interceptor_task_response_iterator(
+ )
+ return self._response_aiter
+
+ async def read(self) -> ResponseType:
+ if self._response_aiter is None:
+ self._response_aiter = self._wait_for_interceptor_task_response_iterator(
+ )
+ return await self._response_aiter.asend(None)
+
+
+class _InterceptedStreamRequestMixin:
+
+ _write_to_iterator_async_gen: Optional[AsyncIterable[RequestType]]
+ _write_to_iterator_queue: Optional[asyncio.Queue]
+ _status_code_task: Optional[asyncio.Task]
+
+ _FINISH_ITERATOR_SENTINEL = object()
+
+ def _init_stream_request_mixin(
+ self, request_iterator: Optional[RequestIterableType]
+ ) -> RequestIterableType:
+
+ if request_iterator is None:
+ # We provide our own request iterator which is a proxy
+ # of the futures writes that will be done by the caller.
+ self._write_to_iterator_queue = asyncio.Queue(maxsize=1)
+ self._write_to_iterator_async_gen = self._proxy_writes_as_request_iterator(
+ )
+ self._status_code_task = None
+ request_iterator = self._write_to_iterator_async_gen
+ else:
+ self._write_to_iterator_queue = None
+
+ return request_iterator
+
+ async def _proxy_writes_as_request_iterator(self):
+ await self._interceptors_task
+
+ while True:
+ value = await self._write_to_iterator_queue.get()
+ if value is _InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL:
+ break
+ yield value
+
+ async def _write_to_iterator_queue_interruptible(self, request: RequestType,
+ call: InterceptedCall):
+ # Write the specified 'request' to the request iterator queue using the
+ # specified 'call' to allow for interruption of the write in the case
+ # of abrupt termination of the call.
+ if self._status_code_task is None:
+ self._status_code_task = self._loop.create_task(call.code())
+
+ await asyncio.wait(
+ (self._loop.create_task(self._write_to_iterator_queue.put(request)),
+ self._status_code_task),
+ return_when=asyncio.FIRST_COMPLETED)
+
+ async def write(self, request: RequestType) -> None:
+ # If no queue was created it means that requests
+ # should be expected through an iterators provided
+ # by the caller.
+ if self._write_to_iterator_queue is None:
+ raise cygrpc.UsageError(_API_STYLE_ERROR)
+
+ try:
+ call = await self._interceptors_task
+ except (asyncio.CancelledError, AioRpcError):
+ raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
+
+ if call.done():
+ raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
+ elif call._done_writing_flag:
+ raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS)
+
+ await self._write_to_iterator_queue_interruptible(request, call)
+
+ if call.done():
+ raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
+
+ async def done_writing(self) -> None:
+ """Signal peer that client is done writing.
+
+ This method is idempotent.
+ """
+ # If no queue was created it means that requests
+ # should be expected through an iterators provided
+ # by the caller.
+ if self._write_to_iterator_queue is None:
+ raise cygrpc.UsageError(_API_STYLE_ERROR)
+
+ try:
+ call = await self._interceptors_task
+ except asyncio.CancelledError:
+ raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS)
+
+ await self._write_to_iterator_queue_interruptible(
+ _InterceptedStreamRequestMixin._FINISH_ITERATOR_SENTINEL, call)
+
+
+class InterceptedUnaryUnaryCall(_InterceptedUnaryResponseMixin, InterceptedCall,
+ _base_call.UnaryUnaryCall):
+ """Used for running a `UnaryUnaryCall` wrapped by interceptors.
+
+ For the `__await__` method is it is proxied to the intercepted call only when
+ the interceptor task is finished.
+ """
+
+ _loop: asyncio.AbstractEventLoop
+ _channel: cygrpc.AioChannel
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, interceptors: Sequence[UnaryUnaryClientInterceptor],
+ request: RequestType, timeout: Optional[float],
+ metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._channel = channel
+ interceptors_task = loop.create_task(
+ self._invoke(interceptors, method, timeout, metadata, credentials,
+ wait_for_ready, request, request_serializer,
+ response_deserializer))
+ super().__init__(interceptors_task)
+
+ # pylint: disable=too-many-arguments
+ async def _invoke(
+ self, interceptors: Sequence[UnaryUnaryClientInterceptor],
+ method: bytes, timeout: Optional[float],
+ metadata: Optional[Metadata],
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], request: RequestType,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction) -> UnaryUnaryCall:
+ """Run the RPC call wrapped in interceptors"""
+
+ async def _run_interceptor(
+ interceptors: List[UnaryUnaryClientInterceptor],
+ client_call_details: ClientCallDetails,
+ request: RequestType) -> _base_call.UnaryUnaryCall:
+
+ if interceptors:
+ continuation = functools.partial(_run_interceptor,
+ interceptors[1:])
+ call_or_response = await interceptors[0].intercept_unary_unary(
+ continuation, client_call_details, request)
+
+ if isinstance(call_or_response, _base_call.UnaryUnaryCall):
+ return call_or_response
+ else:
+ return UnaryUnaryCallResponse(call_or_response)
+
+ else:
+ return UnaryUnaryCall(
+ request, _timeout_to_deadline(client_call_details.timeout),
+ client_call_details.metadata,
+ client_call_details.credentials,
+ client_call_details.wait_for_ready, self._channel,
+ client_call_details.method, request_serializer,
+ response_deserializer, self._loop)
+
+ client_call_details = ClientCallDetails(method, timeout, metadata,
+ credentials, wait_for_ready)
+ return await _run_interceptor(list(interceptors), client_call_details,
+ request)
+
+ def time_remaining(self) -> Optional[float]:
+ raise NotImplementedError()
+
+
+class InterceptedUnaryStreamCall(_InterceptedStreamResponseMixin,
+ InterceptedCall, _base_call.UnaryStreamCall):
+ """Used for running a `UnaryStreamCall` wrapped by interceptors."""
+
+ _loop: asyncio.AbstractEventLoop
+ _channel: cygrpc.AioChannel
+ _last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall]
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, interceptors: Sequence[UnaryStreamClientInterceptor],
+ request: RequestType, timeout: Optional[float],
+ metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._channel = channel
+ self._init_stream_response_mixin()
+ self._last_returned_call_from_interceptors = None
+ interceptors_task = loop.create_task(
+ self._invoke(interceptors, method, timeout, metadata, credentials,
+ wait_for_ready, request, request_serializer,
+ response_deserializer))
+ super().__init__(interceptors_task)
+
+ # pylint: disable=too-many-arguments
+ async def _invoke(
+ self, interceptors: Sequence[UnaryUnaryClientInterceptor],
+ method: bytes, timeout: Optional[float],
+ metadata: Optional[Metadata],
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], request: RequestType,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction) -> UnaryStreamCall:
+ """Run the RPC call wrapped in interceptors"""
+
+ async def _run_interceptor(
+ interceptors: List[UnaryStreamClientInterceptor],
+ client_call_details: ClientCallDetails,
+ request: RequestType,
+ ) -> _base_call.UnaryUnaryCall:
+
+ if interceptors:
+ continuation = functools.partial(_run_interceptor,
+ interceptors[1:])
+
+ call_or_response_iterator = await interceptors[
+ 0].intercept_unary_stream(continuation, client_call_details,
+ request)
+
+ if isinstance(call_or_response_iterator,
+ _base_call.UnaryStreamCall):
+ self._last_returned_call_from_interceptors = call_or_response_iterator
+ else:
+ self._last_returned_call_from_interceptors = UnaryStreamCallResponseIterator(
+ self._last_returned_call_from_interceptors,
+ call_or_response_iterator)
+ return self._last_returned_call_from_interceptors
+ else:
+ self._last_returned_call_from_interceptors = UnaryStreamCall(
+ request, _timeout_to_deadline(client_call_details.timeout),
+ client_call_details.metadata,
+ client_call_details.credentials,
+ client_call_details.wait_for_ready, self._channel,
+ client_call_details.method, request_serializer,
+ response_deserializer, self._loop)
+
+ return self._last_returned_call_from_interceptors
+
+ client_call_details = ClientCallDetails(method, timeout, metadata,
+ credentials, wait_for_ready)
+ return await _run_interceptor(list(interceptors), client_call_details,
+ request)
+
+ def time_remaining(self) -> Optional[float]:
+ raise NotImplementedError()
+
+
+class InterceptedStreamUnaryCall(_InterceptedUnaryResponseMixin,
+ _InterceptedStreamRequestMixin,
+ InterceptedCall, _base_call.StreamUnaryCall):
+ """Used for running a `StreamUnaryCall` wrapped by interceptors.
+
+ For the `__await__` method is it is proxied to the intercepted call only when
+ the interceptor task is finished.
+ """
+
+ _loop: asyncio.AbstractEventLoop
+ _channel: cygrpc.AioChannel
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, interceptors: Sequence[StreamUnaryClientInterceptor],
+ request_iterator: Optional[RequestIterableType],
+ timeout: Optional[float], metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._channel = channel
+ request_iterator = self._init_stream_request_mixin(request_iterator)
+ interceptors_task = loop.create_task(
+ self._invoke(interceptors, method, timeout, metadata, credentials,
+ wait_for_ready, request_iterator, request_serializer,
+ response_deserializer))
+ super().__init__(interceptors_task)
+
+ # pylint: disable=too-many-arguments
+ async def _invoke(
+ self, interceptors: Sequence[StreamUnaryClientInterceptor],
+ method: bytes, timeout: Optional[float],
+ metadata: Optional[Metadata],
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool],
+ request_iterator: RequestIterableType,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction) -> StreamUnaryCall:
+ """Run the RPC call wrapped in interceptors"""
+
+ async def _run_interceptor(
+ interceptors: Iterator[UnaryUnaryClientInterceptor],
+ client_call_details: ClientCallDetails,
+ request_iterator: RequestIterableType
+ ) -> _base_call.StreamUnaryCall:
+
+ if interceptors:
+ continuation = functools.partial(_run_interceptor,
+ interceptors[1:])
+
+ return await interceptors[0].intercept_stream_unary(
+ continuation, client_call_details, request_iterator)
+ else:
+ return StreamUnaryCall(
+ request_iterator,
+ _timeout_to_deadline(client_call_details.timeout),
+ client_call_details.metadata,
+ client_call_details.credentials,
+ client_call_details.wait_for_ready, self._channel,
+ client_call_details.method, request_serializer,
+ response_deserializer, self._loop)
+
+ client_call_details = ClientCallDetails(method, timeout, metadata,
+ credentials, wait_for_ready)
+ return await _run_interceptor(list(interceptors), client_call_details,
+ request_iterator)
+
+ def time_remaining(self) -> Optional[float]:
+ raise NotImplementedError()
+
+
+class InterceptedStreamStreamCall(_InterceptedStreamResponseMixin,
+ _InterceptedStreamRequestMixin,
+ InterceptedCall, _base_call.StreamStreamCall):
+ """Used for running a `StreamStreamCall` wrapped by interceptors."""
+
+ _loop: asyncio.AbstractEventLoop
+ _channel: cygrpc.AioChannel
+ _last_returned_call_from_interceptors = Optional[_base_call.UnaryStreamCall]
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, interceptors: Sequence[StreamStreamClientInterceptor],
+ request_iterator: Optional[RequestIterableType],
+ timeout: Optional[float], metadata: Metadata,
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool], channel: cygrpc.AioChannel,
+ method: bytes, request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction,
+ loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._channel = channel
+ self._init_stream_response_mixin()
+ request_iterator = self._init_stream_request_mixin(request_iterator)
+ self._last_returned_call_from_interceptors = None
+ interceptors_task = loop.create_task(
+ self._invoke(interceptors, method, timeout, metadata, credentials,
+ wait_for_ready, request_iterator, request_serializer,
+ response_deserializer))
+ super().__init__(interceptors_task)
+
+ # pylint: disable=too-many-arguments
+ async def _invoke(
+ self, interceptors: Sequence[StreamStreamClientInterceptor],
+ method: bytes, timeout: Optional[float],
+ metadata: Optional[Metadata],
+ credentials: Optional[grpc.CallCredentials],
+ wait_for_ready: Optional[bool],
+ request_iterator: RequestIterableType,
+ request_serializer: SerializingFunction,
+ response_deserializer: DeserializingFunction) -> StreamStreamCall:
+ """Run the RPC call wrapped in interceptors"""
+
+ async def _run_interceptor(
+ interceptors: List[StreamStreamClientInterceptor],
+ client_call_details: ClientCallDetails,
+ request_iterator: RequestIterableType
+ ) -> _base_call.StreamStreamCall:
+
+ if interceptors:
+ continuation = functools.partial(_run_interceptor,
+ interceptors[1:])
+
+ call_or_response_iterator = await interceptors[
+ 0].intercept_stream_stream(continuation,
+ client_call_details,
+ request_iterator)
+
+ if isinstance(call_or_response_iterator,
+ _base_call.StreamStreamCall):
+ self._last_returned_call_from_interceptors = call_or_response_iterator
+ else:
+ self._last_returned_call_from_interceptors = StreamStreamCallResponseIterator(
+ self._last_returned_call_from_interceptors,
+ call_or_response_iterator)
+ return self._last_returned_call_from_interceptors
+ else:
+ self._last_returned_call_from_interceptors = StreamStreamCall(
+ request_iterator,
+ _timeout_to_deadline(client_call_details.timeout),
+ client_call_details.metadata,
+ client_call_details.credentials,
+ client_call_details.wait_for_ready, self._channel,
+ client_call_details.method, request_serializer,
+ response_deserializer, self._loop)
+ return self._last_returned_call_from_interceptors
+
+ client_call_details = ClientCallDetails(method, timeout, metadata,
+ credentials, wait_for_ready)
+ return await _run_interceptor(list(interceptors), client_call_details,
+ request_iterator)
+
+ def time_remaining(self) -> Optional[float]:
+ raise NotImplementedError()
+
+
+class UnaryUnaryCallResponse(_base_call.UnaryUnaryCall):
+ """Final UnaryUnaryCall class finished with a response."""
+ _response: ResponseType
+
+ def __init__(self, response: ResponseType) -> None:
+ self._response = response
+
+ def cancel(self) -> bool:
+ return False
+
+ def cancelled(self) -> bool:
+ return False
+
+ def done(self) -> bool:
+ return True
+
+ def add_done_callback(self, unused_callback) -> None:
+ raise NotImplementedError()
+
+ def time_remaining(self) -> Optional[float]:
+ raise NotImplementedError()
+
+ async def initial_metadata(self) -> Optional[Metadata]:
+ return None
+
+ async def trailing_metadata(self) -> Optional[Metadata]:
+ return None
+
+ async def code(self) -> grpc.StatusCode:
+ return grpc.StatusCode.OK
+
+ async def details(self) -> str:
+ return ''
+
+ async def debug_error_string(self) -> Optional[str]:
+ return None
+
+ def __await__(self):
+ if False: # pylint: disable=using-constant-test
+ # This code path is never used, but a yield statement is needed
+ # for telling the interpreter that __await__ is a generator.
+ yield None
+ return self._response
+
+ async def wait_for_connection(self) -> None:
+ pass
+
+
+class _StreamCallResponseIterator:
+
+ _call: Union[_base_call.UnaryStreamCall, _base_call.StreamStreamCall]
+ _response_iterator: AsyncIterable[ResponseType]
+
+ def __init__(self, call: Union[_base_call.UnaryStreamCall,
+ _base_call.StreamStreamCall],
+ response_iterator: AsyncIterable[ResponseType]) -> None:
+ self._response_iterator = response_iterator
+ self._call = call
+
+ def cancel(self) -> bool:
+ return self._call.cancel()
+
+ def cancelled(self) -> bool:
+ return self._call.cancelled()
+
+ def done(self) -> bool:
+ return self._call.done()
+
+ def add_done_callback(self, callback) -> None:
+ self._call.add_done_callback(callback)
+
+ def time_remaining(self) -> Optional[float]:
+ return self._call.time_remaining()
+
+ async def initial_metadata(self) -> Optional[Metadata]:
+ return await self._call.initial_metadata()
+
+ async def trailing_metadata(self) -> Optional[Metadata]:
+ return await self._call.trailing_metadata()
+
+ async def code(self) -> grpc.StatusCode:
+ return await self._call.code()
+
+ async def details(self) -> str:
+ return await self._call.details()
+
+ async def debug_error_string(self) -> Optional[str]:
+ return await self._call.debug_error_string()
+
+ def __aiter__(self):
+ return self._response_iterator.__aiter__()
+
+ async def wait_for_connection(self) -> None:
+ return await self._call.wait_for_connection()
+
+
+class UnaryStreamCallResponseIterator(_StreamCallResponseIterator,
+ _base_call.UnaryStreamCall):
+ """UnaryStreamCall class wich uses an alternative response iterator."""
+
+ async def read(self) -> ResponseType:
+ # Behind the scenes everyting goes through the
+ # async iterator. So this path should not be reached.
+ raise NotImplementedError()
+
+
+class StreamStreamCallResponseIterator(_StreamCallResponseIterator,
+ _base_call.StreamStreamCall):
+ """StreamStreamCall class wich uses an alternative response iterator."""
+
+ async def read(self) -> ResponseType:
+ # Behind the scenes everyting goes through the
+ # async iterator. So this path should not be reached.
+ raise NotImplementedError()
+
+ async def write(self, request: RequestType) -> None:
+ # Behind the scenes everyting goes through the
+ # async iterator provided by the InterceptedStreamStreamCall.
+ # So this path should not be reached.
+ raise NotImplementedError()
+
+ async def done_writing(self) -> None:
+ # Behind the scenes everyting goes through the
+ # async iterator provided by the InterceptedStreamStreamCall.
+ # So this path should not be reached.
+ raise NotImplementedError()
+
+ @property
+ def _done_writing_flag(self) -> bool:
+ return self._call._done_writing_flag
diff --git a/contrib/python/grpcio/py3/grpc/aio/_metadata.py b/contrib/python/grpcio/py3/grpc/aio/_metadata.py
new file mode 100644
index 0000000000..970f62c059
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_metadata.py
@@ -0,0 +1,120 @@
+# Copyright 2020 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Implementation of the metadata abstraction for gRPC Asyncio Python."""
+from collections import OrderedDict
+from collections import abc
+from typing import Any, Iterator, List, Tuple, Union
+
+MetadataKey = str
+MetadataValue = Union[str, bytes]
+
+
+class Metadata(abc.Mapping):
+ """Metadata abstraction for the asynchronous calls and interceptors.
+
+ The metadata is a mapping from str -> List[str]
+
+ Traits
+ * Multiple entries are allowed for the same key
+ * The order of the values by key is preserved
+ * Getting by an element by key, retrieves the first mapped value
+ * Supports an immutable view of the data
+ * Allows partial mutation on the data without recreating the new object from scratch.
+ """
+
+ def __init__(self, *args: Tuple[MetadataKey, MetadataValue]) -> None:
+ self._metadata = OrderedDict()
+ for md_key, md_value in args:
+ self.add(md_key, md_value)
+
+ @classmethod
+ def from_tuple(cls, raw_metadata: tuple):
+ if raw_metadata:
+ return cls(*raw_metadata)
+ return cls()
+
+ def add(self, key: MetadataKey, value: MetadataValue) -> None:
+ self._metadata.setdefault(key, [])
+ self._metadata[key].append(value)
+
+ def __len__(self) -> int:
+ """Return the total number of elements that there are in the metadata,
+ including multiple values for the same key.
+ """
+ return sum(map(len, self._metadata.values()))
+
+ def __getitem__(self, key: MetadataKey) -> MetadataValue:
+ """When calling <metadata>[<key>], the first element of all those
+ mapped for <key> is returned.
+ """
+ try:
+ return self._metadata[key][0]
+ except (ValueError, IndexError) as e:
+ raise KeyError("{0!r}".format(key)) from e
+
+ def __setitem__(self, key: MetadataKey, value: MetadataValue) -> None:
+ """Calling metadata[<key>] = <value>
+ Maps <value> to the first instance of <key>.
+ """
+ if key not in self:
+ self._metadata[key] = [value]
+ else:
+ current_values = self.get_all(key)
+ self._metadata[key] = [value, *current_values[1:]]
+
+ def __delitem__(self, key: MetadataKey) -> None:
+ """``del metadata[<key>]`` deletes the first mapping for <key>."""
+ current_values = self.get_all(key)
+ if not current_values:
+ raise KeyError(repr(key))
+ self._metadata[key] = current_values[1:]
+
+ def delete_all(self, key: MetadataKey) -> None:
+ """Delete all mappings for <key>."""
+ del self._metadata[key]
+
+ def __iter__(self) -> Iterator[Tuple[MetadataKey, MetadataValue]]:
+ for key, values in self._metadata.items():
+ for value in values:
+ yield (key, value)
+
+ def get_all(self, key: MetadataKey) -> List[MetadataValue]:
+ """For compatibility with other Metadata abstraction objects (like in Java),
+ this would return all items under the desired <key>.
+ """
+ return self._metadata.get(key, [])
+
+ def set_all(self, key: MetadataKey, values: List[MetadataValue]) -> None:
+ self._metadata[key] = values
+
+ def __contains__(self, key: MetadataKey) -> bool:
+ return key in self._metadata
+
+ def __eq__(self, other: Any) -> bool:
+ if isinstance(other, self.__class__):
+ return self._metadata == other._metadata
+ if isinstance(other, tuple):
+ return tuple(self) == other
+ return NotImplemented # pytype: disable=bad-return-type
+
+ def __add__(self, other: Any) -> 'Metadata':
+ if isinstance(other, self.__class__):
+ return Metadata(*(tuple(self) + tuple(other)))
+ if isinstance(other, tuple):
+ return Metadata(*(tuple(self) + other))
+ return NotImplemented # pytype: disable=bad-return-type
+
+ def __repr__(self) -> str:
+ view = tuple(self)
+ return "{0}({1!r})".format(self.__class__.__name__, view)
diff --git a/contrib/python/grpcio/py3/grpc/aio/_server.py b/contrib/python/grpcio/py3/grpc/aio/_server.py
new file mode 100644
index 0000000000..1465ab6bbb
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_server.py
@@ -0,0 +1,209 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Server-side implementation of gRPC Asyncio Python."""
+
+from concurrent.futures import Executor
+from typing import Any, Optional, Sequence
+
+import grpc
+from grpc import _common
+from grpc import _compression
+from grpc._cython import cygrpc
+
+from . import _base_server
+from ._interceptor import ServerInterceptor
+from ._typing import ChannelArgumentType
+
+
+def _augment_channel_arguments(base_options: ChannelArgumentType,
+ compression: Optional[grpc.Compression]):
+ compression_option = _compression.create_channel_option(compression)
+ return tuple(base_options) + compression_option
+
+
+class Server(_base_server.Server):
+ """Serves RPCs."""
+
+ def __init__(self, thread_pool: Optional[Executor],
+ generic_handlers: Optional[Sequence[grpc.GenericRpcHandler]],
+ interceptors: Optional[Sequence[Any]],
+ options: ChannelArgumentType,
+ maximum_concurrent_rpcs: Optional[int],
+ compression: Optional[grpc.Compression]):
+ self._loop = cygrpc.get_working_loop()
+ if interceptors:
+ invalid_interceptors = [
+ interceptor for interceptor in interceptors
+ if not isinstance(interceptor, ServerInterceptor)
+ ]
+ if invalid_interceptors:
+ raise ValueError(
+ 'Interceptor must be ServerInterceptor, the '
+ f'following are invalid: {invalid_interceptors}')
+ self._server = cygrpc.AioServer(
+ self._loop, thread_pool, generic_handlers, interceptors,
+ _augment_channel_arguments(options, compression),
+ maximum_concurrent_rpcs)
+
+ def add_generic_rpc_handlers(
+ self,
+ generic_rpc_handlers: Sequence[grpc.GenericRpcHandler]) -> None:
+ """Registers GenericRpcHandlers with this Server.
+
+ This method is only safe to call before the server is started.
+
+ Args:
+ generic_rpc_handlers: A sequence of GenericRpcHandlers that will be
+ used to service RPCs.
+ """
+ self._server.add_generic_rpc_handlers(generic_rpc_handlers)
+
+ def add_insecure_port(self, address: str) -> int:
+ """Opens an insecure port for accepting RPCs.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port. If the port is 0,
+ or not specified in the address, then the gRPC runtime will choose a port.
+
+ Returns:
+ An integer port on which the server will accept RPC requests.
+ """
+ return _common.validate_port_binding_result(
+ address, self._server.add_insecure_port(_common.encode(address)))
+
+ def add_secure_port(self, address: str,
+ server_credentials: grpc.ServerCredentials) -> int:
+ """Opens a secure port for accepting RPCs.
+
+ This method may only be called before starting the server.
+
+ Args:
+ address: The address for which to open a port.
+ if the port is 0, or not specified in the address, then the gRPC
+ runtime will choose a port.
+ server_credentials: A ServerCredentials object.
+
+ Returns:
+ An integer port on which the server will accept RPC requests.
+ """
+ return _common.validate_port_binding_result(
+ address,
+ self._server.add_secure_port(_common.encode(address),
+ server_credentials))
+
+ async def start(self) -> None:
+ """Starts this Server.
+
+ This method may only be called once. (i.e. it is not idempotent).
+ """
+ await self._server.start()
+
+ async def stop(self, grace: Optional[float]) -> None:
+ """Stops this Server.
+
+ This method immediately stops the server from servicing new RPCs in
+ all cases.
+
+ If a grace period is specified, this method returns immediately and all
+ RPCs active at the end of the grace period are aborted. If a grace
+ period is not specified (by passing None for grace), all existing RPCs
+ are aborted immediately and this method blocks until the last RPC
+ handler terminates.
+
+ This method is idempotent and may be called at any time. Passing a
+ smaller grace value in a subsequent call will have the effect of
+ stopping the Server sooner (passing None will have the effect of
+ stopping the server immediately). Passing a larger grace value in a
+ subsequent call will not have the effect of stopping the server later
+ (i.e. the most restrictive grace value is used).
+
+ Args:
+ grace: A duration of time in seconds or None.
+ """
+ await self._server.shutdown(grace)
+
+ async def wait_for_termination(self,
+ timeout: Optional[float] = None) -> bool:
+ """Block current coroutine until the server stops.
+
+ This is an EXPERIMENTAL API.
+
+ The wait will not consume computational resources during blocking, and
+ it will block until one of the two following conditions are met:
+
+ 1) The server is stopped or terminated;
+ 2) A timeout occurs if timeout is not `None`.
+
+ The timeout argument works in the same way as `threading.Event.wait()`.
+ https://docs.python.org/3/library/threading.html#threading.Event.wait
+
+ Args:
+ timeout: A floating point number specifying a timeout for the
+ operation in seconds.
+
+ Returns:
+ A bool indicates if the operation times out.
+ """
+ return await self._server.wait_for_termination(timeout)
+
+ def __del__(self):
+ """Schedules a graceful shutdown in current event loop.
+
+ The Cython AioServer doesn't hold a ref-count to this class. It should
+ be safe to slightly extend the underlying Cython object's life span.
+ """
+ if hasattr(self, '_server'):
+ if self._server.is_running():
+ cygrpc.schedule_coro_threadsafe(
+ self._server.shutdown(None),
+ self._loop,
+ )
+
+
+def server(migration_thread_pool: Optional[Executor] = None,
+ handlers: Optional[Sequence[grpc.GenericRpcHandler]] = None,
+ interceptors: Optional[Sequence[Any]] = None,
+ options: Optional[ChannelArgumentType] = None,
+ maximum_concurrent_rpcs: Optional[int] = None,
+ compression: Optional[grpc.Compression] = None):
+ """Creates a Server with which RPCs can be serviced.
+
+ Args:
+ migration_thread_pool: A futures.ThreadPoolExecutor to be used by the
+ Server to execute non-AsyncIO RPC handlers for migration purpose.
+ handlers: An optional list of GenericRpcHandlers used for executing RPCs.
+ More handlers may be added by calling add_generic_rpc_handlers any time
+ before the server is started.
+ interceptors: An optional list of ServerInterceptor objects that observe
+ and optionally manipulate the incoming RPCs before handing them over to
+ handlers. The interceptors are given control in the order they are
+ specified. This is an EXPERIMENTAL API.
+ options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC runtime)
+ to configure the channel.
+ maximum_concurrent_rpcs: The maximum number of concurrent RPCs this server
+ will service before returning RESOURCE_EXHAUSTED status, or None to
+ indicate no limit.
+ compression: An element of grpc.compression, e.g.
+ grpc.compression.Gzip. This compression algorithm will be used for the
+ lifetime of the server unless overridden by set_compression.
+
+ Returns:
+ A Server object.
+ """
+ return Server(migration_thread_pool, () if handlers is None else handlers,
+ () if interceptors is None else interceptors,
+ () if options is None else options, maximum_concurrent_rpcs,
+ compression)
diff --git a/contrib/python/grpcio/py3/grpc/aio/_typing.py b/contrib/python/grpcio/py3/grpc/aio/_typing.py
new file mode 100644
index 0000000000..f9c0eb10fc
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_typing.py
@@ -0,0 +1,35 @@
+# Copyright 2019 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Common types for gRPC Async API"""
+
+from typing import (Any, AsyncIterable, Callable, Iterable, Sequence, Tuple,
+ TypeVar, Union)
+
+from grpc._cython.cygrpc import EOF
+
+from ._metadata import Metadata
+from ._metadata import MetadataKey
+from ._metadata import MetadataValue
+
+RequestType = TypeVar('RequestType')
+ResponseType = TypeVar('ResponseType')
+SerializingFunction = Callable[[Any], bytes]
+DeserializingFunction = Callable[[bytes], Any]
+MetadatumType = Tuple[MetadataKey, MetadataValue]
+MetadataType = Union[Metadata, Sequence[MetadatumType]]
+ChannelArgumentType = Sequence[Tuple[str, Any]]
+EOFType = type(EOF)
+DoneCallbackType = Callable[[Any], None]
+RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]]
+ResponseIterableType = AsyncIterable[Any]
diff --git a/contrib/python/grpcio/py3/grpc/aio/_utils.py b/contrib/python/grpcio/py3/grpc/aio/_utils.py
new file mode 100644
index 0000000000..e5772dce2d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/aio/_utils.py
@@ -0,0 +1,22 @@
+# Copyright 2019 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Internal utilities used by the gRPC Aio module."""
+import time
+from typing import Optional
+
+
+def _timeout_to_deadline(timeout: Optional[float]) -> Optional[float]:
+ if timeout is None:
+ return None
+ return time.time() + timeout
diff --git a/contrib/python/grpcio/py3/grpc/beta/__init__.py b/contrib/python/grpcio/py3/grpc/beta/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/beta/_client_adaptations.py b/contrib/python/grpcio/py3/grpc/beta/_client_adaptations.py
new file mode 100644
index 0000000000..652ae0ea17
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/_client_adaptations.py
@@ -0,0 +1,706 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Translates gRPC's client-side API into gRPC's client-side Beta API."""
+
+import grpc
+from grpc import _common
+from grpc.beta import _metadata
+from grpc.beta import interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.foundation import future
+from grpc.framework.interfaces.face import face
+
+# pylint: disable=too-many-arguments,too-many-locals,unused-argument
+
+_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = {
+ grpc.StatusCode.CANCELLED:
+ (face.Abortion.Kind.CANCELLED, face.CancellationError),
+ grpc.StatusCode.UNKNOWN:
+ (face.Abortion.Kind.REMOTE_FAILURE, face.RemoteError),
+ grpc.StatusCode.DEADLINE_EXCEEDED:
+ (face.Abortion.Kind.EXPIRED, face.ExpirationError),
+ grpc.StatusCode.UNIMPLEMENTED:
+ (face.Abortion.Kind.LOCAL_FAILURE, face.LocalError),
+}
+
+
+def _effective_metadata(metadata, metadata_transformer):
+ non_none_metadata = () if metadata is None else metadata
+ if metadata_transformer is None:
+ return non_none_metadata
+ else:
+ return metadata_transformer(non_none_metadata)
+
+
+def _credentials(grpc_call_options):
+ return None if grpc_call_options is None else grpc_call_options.credentials
+
+
+def _abortion(rpc_error_call):
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
+ return face.Abortion(error_kind, rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code,
+ rpc_error_call.details())
+
+
+def _abortion_error(rpc_error_call):
+ code = rpc_error_call.code()
+ pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
+ exception_class = face.AbortionError if pair is None else pair[1]
+ return exception_class(rpc_error_call.initial_metadata(),
+ rpc_error_call.trailing_metadata(), code,
+ rpc_error_call.details())
+
+
+class _InvocationProtocolContext(interfaces.GRPCInvocationContext):
+
+ def disable_next_request_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+
+
+class _Rendezvous(future.Future, face.Call):
+
+ def __init__(self, response_future, response_iterator, call):
+ self._future = response_future
+ self._iterator = response_iterator
+ self._call = call
+
+ def cancel(self):
+ return self._call.cancel()
+
+ def cancelled(self):
+ return self._future.cancelled()
+
+ def running(self):
+ return self._future.running()
+
+ def done(self):
+ return self._future.done()
+
+ def result(self, timeout=None):
+ try:
+ return self._future.result(timeout=timeout)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def exception(self, timeout=None):
+ try:
+ rpc_error_call = self._future.exception(timeout=timeout)
+ if rpc_error_call is None:
+ return None
+ else:
+ return _abortion_error(rpc_error_call)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def traceback(self, timeout=None):
+ try:
+ return self._future.traceback(timeout=timeout)
+ except grpc.FutureTimeoutError:
+ raise future.TimeoutError()
+ except grpc.FutureCancelledError:
+ raise future.CancelledError()
+
+ def add_done_callback(self, fn):
+ self._future.add_done_callback(lambda ignored_callback: fn(self))
+
+ def __iter__(self):
+ return self
+
+ def _next(self):
+ try:
+ return next(self._iterator)
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+ def __next__(self):
+ return self._next()
+
+ def next(self):
+ return self._next()
+
+ def is_active(self):
+ return self._call.is_active()
+
+ def time_remaining(self):
+ return self._call.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+
+ def done_callback():
+ if self.code() is not grpc.StatusCode.OK:
+ abortion_callback(_abortion(self._call))
+
+ registered = self._call.add_callback(done_callback)
+ return None if registered else done_callback()
+
+ def protocol_context(self):
+ return _InvocationProtocolContext()
+
+ def initial_metadata(self):
+ return _metadata.beta(self._call.initial_metadata())
+
+ def terminal_metadata(self):
+ return _metadata.beta(self._call.terminal_metadata())
+
+ def code(self):
+ return self._call.code()
+
+ def details(self):
+ return self._call.details()
+
+
+def _blocking_unary_unary(channel, group, method, timeout, with_call,
+ protocol_options, metadata, metadata_transformer,
+ request, request_serializer, response_deserializer):
+ try:
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable.with_call(
+ request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_unary_unary(channel, group, method, timeout, protocol_options,
+ metadata, metadata_transformer, request,
+ request_serializer, response_deserializer):
+ multi_callable = channel.unary_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_future = multi_callable.future(
+ request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _unary_stream(channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request, request_serializer,
+ response_deserializer):
+ multi_callable = channel.unary_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+def _blocking_stream_unary(channel, group, method, timeout, with_call,
+ protocol_options, metadata, metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer):
+ try:
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ if with_call:
+ response, call = multi_callable.with_call(
+ request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return response, _Rendezvous(None, None, call)
+ else:
+ return multi_callable(request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ except grpc.RpcError as rpc_error_call:
+ raise _abortion_error(rpc_error_call)
+
+
+def _future_stream_unary(channel, group, method, timeout, protocol_options,
+ metadata, metadata_transformer, request_iterator,
+ request_serializer, response_deserializer):
+ multi_callable = channel.stream_unary(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_future = multi_callable.future(
+ request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(response_future, None, response_future)
+
+
+def _stream_stream(channel, group, method, timeout, protocol_options, metadata,
+ metadata_transformer, request_iterator, request_serializer,
+ response_deserializer):
+ multi_callable = channel.stream_stream(
+ _common.fully_qualified_method(group, method),
+ request_serializer=request_serializer,
+ response_deserializer=response_deserializer)
+ effective_metadata = _effective_metadata(metadata, metadata_transformer)
+ response_iterator = multi_callable(
+ request_iterator,
+ timeout=timeout,
+ metadata=_metadata.unbeta(effective_metadata),
+ credentials=_credentials(protocol_options))
+ return _Rendezvous(None, response_iterator, response_iterator)
+
+
+class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ return _blocking_unary_unary(self._channel, self._group, self._method,
+ timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer,
+ request, self._request_serializer,
+ self._response_deserializer)
+
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ return _future_unary_unary(self._channel, self._group, self._method,
+ timeout, protocol_options, metadata,
+ self._metadata_transformer, request,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ return _unary_stream(self._channel, self._group, self._method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ return _blocking_stream_unary(self._channel, self._group, self._method,
+ timeout, with_call, protocol_options,
+ metadata, self._metadata_transformer,
+ request_iterator,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def future(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ return _future_stream_unary(self._channel, self._group, self._method,
+ timeout, protocol_options, metadata,
+ self._metadata_transformer,
+ request_iterator, self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _StreamStreamMultiCallable(face.StreamStreamMultiCallable):
+
+ def __init__(self, channel, group, method, metadata_transformer,
+ request_serializer, response_deserializer):
+ self._channel = channel
+ self._group = group
+ self._method = method
+ self._metadata_transformer = metadata_transformer
+ self._request_serializer = request_serializer
+ self._response_deserializer = response_deserializer
+
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ return _stream_stream(self._channel, self._group, self._method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request_iterator,
+ self._request_serializer,
+ self._response_deserializer)
+
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+
+class _GenericStub(face.GenericStub):
+
+ def __init__(self, channel, metadata_transformer, request_serializers,
+ response_deserializers):
+ self._channel = channel
+ self._metadata_transformer = metadata_transformer
+ self._request_serializers = request_serializers or {}
+ self._response_deserializers = response_deserializers or {}
+
+ def blocking_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ with_call=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _blocking_unary_unary(self._channel, group, method, timeout,
+ with_call, protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def future_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _future_unary_unary(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def inline_unary_stream(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _unary_stream(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request,
+ request_serializer, response_deserializer)
+
+ def blocking_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _blocking_stream_unary(self._channel, group, method, timeout,
+ with_call, protocol_options, metadata,
+ self._metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer)
+
+ def future_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _future_stream_unary(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer,
+ request_iterator, request_serializer,
+ response_deserializer)
+
+ def inline_stream_stream(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _stream_stream(self._channel, group, method, timeout,
+ protocol_options, metadata,
+ self._metadata_transformer, request_iterator,
+ request_serializer, response_deserializer)
+
+ def event_unary_unary(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_unary_stream(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_unary(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def event_stream_stream(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ raise NotImplementedError()
+
+ def unary_unary(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _UnaryUnaryMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def unary_stream(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _UnaryStreamMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def stream_unary(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _StreamUnaryMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def stream_stream(self, group, method):
+ request_serializer = self._request_serializers.get((
+ group,
+ method,
+ ))
+ response_deserializer = self._response_deserializers.get((
+ group,
+ method,
+ ))
+ return _StreamStreamMultiCallable(self._channel, group, method,
+ self._metadata_transformer,
+ request_serializer,
+ response_deserializer)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+class _DynamicStub(face.DynamicStub):
+
+ def __init__(self, backing_generic_stub, group, cardinalities):
+ self._generic_stub = backing_generic_stub
+ self._group = group
+ self._cardinalities = cardinalities
+
+ def __getattr__(self, attr):
+ method_cardinality = self._cardinalities.get(attr)
+ if method_cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return self._generic_stub.unary_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return self._generic_stub.unary_stream(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return self._generic_stub.stream_unary(self._group, attr)
+ elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return self._generic_stub.stream_stream(self._group, attr)
+ else:
+ raise AttributeError('_DynamicStub object has no attribute "%s"!' %
+ attr)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+def generic_stub(channel, host, metadata_transformer, request_serializers,
+ response_deserializers):
+ return _GenericStub(channel, metadata_transformer, request_serializers,
+ response_deserializers)
+
+
+def dynamic_stub(channel, service, cardinalities, host, metadata_transformer,
+ request_serializers, response_deserializers):
+ return _DynamicStub(
+ _GenericStub(channel, metadata_transformer, request_serializers,
+ response_deserializers), service, cardinalities)
diff --git a/contrib/python/grpcio/py3/grpc/beta/_metadata.py b/contrib/python/grpcio/py3/grpc/beta/_metadata.py
new file mode 100644
index 0000000000..b7c8535285
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/_metadata.py
@@ -0,0 +1,52 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""API metadata conversion utilities."""
+
+import collections
+
+_Metadatum = collections.namedtuple('_Metadatum', (
+ 'key',
+ 'value',
+))
+
+
+def _beta_metadatum(key, value):
+ beta_key = key if isinstance(key, (bytes,)) else key.encode('ascii')
+ beta_value = value if isinstance(value, (bytes,)) else value.encode('ascii')
+ return _Metadatum(beta_key, beta_value)
+
+
+def _metadatum(beta_key, beta_value):
+ key = beta_key if isinstance(beta_key, (str,)) else beta_key.decode('utf8')
+ if isinstance(beta_value, (str,)) or key[-4:] == '-bin':
+ value = beta_value
+ else:
+ value = beta_value.decode('utf8')
+ return _Metadatum(key, value)
+
+
+def beta(metadata):
+ if metadata is None:
+ return ()
+ else:
+ return tuple(_beta_metadatum(key, value) for key, value in metadata)
+
+
+def unbeta(beta_metadata):
+ if beta_metadata is None:
+ return ()
+ else:
+ return tuple(
+ _metadatum(beta_key, beta_value)
+ for beta_key, beta_value in beta_metadata)
diff --git a/contrib/python/grpcio/py3/grpc/beta/_server_adaptations.py b/contrib/python/grpcio/py3/grpc/beta/_server_adaptations.py
new file mode 100644
index 0000000000..8843a3c550
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/_server_adaptations.py
@@ -0,0 +1,385 @@
+# Copyright 2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
+
+import collections
+import threading
+
+import grpc
+from grpc import _common
+from grpc.beta import _metadata
+from grpc.beta import interfaces
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import abandonment
+from grpc.framework.foundation import logging_pool
+from grpc.framework.foundation import stream
+from grpc.framework.interfaces.face import face
+
+# pylint: disable=too-many-return-statements
+
+_DEFAULT_POOL_SIZE = 8
+
+
+class _ServerProtocolContext(interfaces.GRPCServicerContext):
+
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
+
+ def peer(self):
+ return self._servicer_context.peer()
+
+ def disable_next_response_compression(self):
+ pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
+
+
+class _FaceServicerContext(face.ServicerContext):
+
+ def __init__(self, servicer_context):
+ self._servicer_context = servicer_context
+
+ def is_active(self):
+ return self._servicer_context.is_active()
+
+ def time_remaining(self):
+ return self._servicer_context.time_remaining()
+
+ def add_abortion_callback(self, abortion_callback):
+ raise NotImplementedError(
+ 'add_abortion_callback no longer supported server-side!')
+
+ def cancel(self):
+ self._servicer_context.cancel()
+
+ def protocol_context(self):
+ return _ServerProtocolContext(self._servicer_context)
+
+ def invocation_metadata(self):
+ return _metadata.beta(self._servicer_context.invocation_metadata())
+
+ def initial_metadata(self, initial_metadata):
+ self._servicer_context.send_initial_metadata(
+ _metadata.unbeta(initial_metadata))
+
+ def terminal_metadata(self, terminal_metadata):
+ self._servicer_context.set_terminal_metadata(
+ _metadata.unbeta(terminal_metadata))
+
+ def code(self, code):
+ self._servicer_context.set_code(code)
+
+ def details(self, details):
+ self._servicer_context.set_details(details)
+
+
+def _adapt_unary_request_inline(unary_request_inline):
+
+ def adaptation(request, servicer_context):
+ return unary_request_inline(request,
+ _FaceServicerContext(servicer_context))
+
+ return adaptation
+
+
+def _adapt_stream_request_inline(stream_request_inline):
+
+ def adaptation(request_iterator, servicer_context):
+ return stream_request_inline(request_iterator,
+ _FaceServicerContext(servicer_context))
+
+ return adaptation
+
+
+class _Callback(stream.Consumer):
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._terminated = False
+ self._cancelled = False
+
+ def consume(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._condition.notify_all()
+
+ def terminate(self):
+ with self._condition:
+ self._terminated = True
+ self._condition.notify_all()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ self._values.append(value)
+ self._terminated = True
+ self._condition.notify_all()
+
+ def cancel(self):
+ with self._condition:
+ self._cancelled = True
+ self._condition.notify_all()
+
+ def draw_one_value(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._values:
+ return self._values.pop(0)
+ elif self._terminated:
+ return None
+ else:
+ self._condition.wait()
+
+ def draw_all_values(self):
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise abandonment.Abandoned()
+ elif self._terminated:
+ all_values = tuple(self._values)
+ self._values = None
+ return all_values
+ else:
+ self._condition.wait()
+
+
+def _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context):
+ thread_joined = threading.Event()
+
+ def pipe_requests():
+ for request in request_iterator:
+ if not servicer_context.is_active() or thread_joined.is_set():
+ return
+ request_consumer.consume(request)
+ if not servicer_context.is_active() or thread_joined.is_set():
+ return
+ request_consumer.terminate()
+
+ request_pipe_thread = threading.Thread(target=pipe_requests)
+ request_pipe_thread.daemon = True
+ request_pipe_thread.start()
+
+
+def _adapt_unary_unary_event(unary_unary_event):
+
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_unary_event(request, callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ return callback.draw_all_values()[0]
+
+ return adaptation
+
+
+def _adapt_unary_stream_event(unary_stream_event):
+
+ def adaptation(request, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ unary_stream_event(request, callback,
+ _FaceServicerContext(servicer_context))
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+
+ return adaptation
+
+
+def _adapt_stream_unary_event(stream_unary_event):
+
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_unary_event(
+ callback.consume_and_terminate,
+ _FaceServicerContext(servicer_context))
+ _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context)
+ return callback.draw_all_values()[0]
+
+ return adaptation
+
+
+def _adapt_stream_stream_event(stream_stream_event):
+
+ def adaptation(request_iterator, servicer_context):
+ callback = _Callback()
+ if not servicer_context.add_callback(callback.cancel):
+ raise abandonment.Abandoned()
+ request_consumer = stream_stream_event(
+ callback, _FaceServicerContext(servicer_context))
+ _run_request_pipe_thread(request_iterator, request_consumer,
+ servicer_context)
+ while True:
+ response = callback.draw_one_value()
+ if response is None:
+ return
+ else:
+ yield response
+
+ return adaptation
+
+
+class _SimpleMethodHandler(
+ collections.namedtuple('_MethodHandler', (
+ 'request_streaming',
+ 'response_streaming',
+ 'request_deserializer',
+ 'response_serializer',
+ 'unary_unary',
+ 'unary_stream',
+ 'stream_unary',
+ 'stream_stream',
+ )), grpc.RpcMethodHandler):
+ pass
+
+
+def _simple_method_handler(implementation, request_deserializer,
+ response_serializer):
+ if implementation.style is style.Service.INLINE:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_request_inline(implementation.unary_unary_inline),
+ None, None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_request_inline(implementation.unary_stream_inline),
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None,
+ None,
+ _adapt_stream_request_inline(
+ implementation.stream_unary_inline), None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None,
+ None, None,
+ _adapt_stream_request_inline(
+ implementation.stream_stream_inline))
+ elif implementation.style is style.Service.EVENT:
+ if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
+ return _SimpleMethodHandler(
+ False, False, request_deserializer, response_serializer,
+ _adapt_unary_unary_event(implementation.unary_unary_event),
+ None, None, None)
+ elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
+ return _SimpleMethodHandler(
+ False, True, request_deserializer, response_serializer, None,
+ _adapt_unary_stream_event(implementation.unary_stream_event),
+ None, None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
+ return _SimpleMethodHandler(
+ True, False, request_deserializer, response_serializer, None,
+ None,
+ _adapt_stream_unary_event(implementation.stream_unary_event),
+ None)
+ elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
+ return _SimpleMethodHandler(
+ True, True, request_deserializer, response_serializer, None,
+ None, None,
+ _adapt_stream_stream_event(implementation.stream_stream_event))
+ raise ValueError()
+
+
+def _flatten_method_pair_map(method_pair_map):
+ method_pair_map = method_pair_map or {}
+ flat_map = {}
+ for method_pair in method_pair_map:
+ method = _common.fully_qualified_method(method_pair[0], method_pair[1])
+ flat_map[method] = method_pair_map[method_pair]
+ return flat_map
+
+
+class _GenericRpcHandler(grpc.GenericRpcHandler):
+
+ def __init__(self, method_implementations, multi_method_implementation,
+ request_deserializers, response_serializers):
+ self._method_implementations = _flatten_method_pair_map(
+ method_implementations)
+ self._request_deserializers = _flatten_method_pair_map(
+ request_deserializers)
+ self._response_serializers = _flatten_method_pair_map(
+ response_serializers)
+ self._multi_method_implementation = multi_method_implementation
+
+ def service(self, handler_call_details):
+ method_implementation = self._method_implementations.get(
+ handler_call_details.method)
+ if method_implementation is not None:
+ return _simple_method_handler(
+ method_implementation,
+ self._request_deserializers.get(handler_call_details.method),
+ self._response_serializers.get(handler_call_details.method))
+ elif self._multi_method_implementation is None:
+ return None
+ else:
+ try:
+ return None #TODO(nathaniel): call the multimethod.
+ except face.NoSuchMethodError:
+ return None
+
+
+class _Server(interfaces.Server):
+
+ def __init__(self, grpc_server):
+ self._grpc_server = grpc_server
+
+ def add_insecure_port(self, address):
+ return self._grpc_server.add_insecure_port(address)
+
+ def add_secure_port(self, address, server_credentials):
+ return self._grpc_server.add_secure_port(address, server_credentials)
+
+ def start(self):
+ self._grpc_server.start()
+
+ def stop(self, grace):
+ return self._grpc_server.stop(grace)
+
+ def __enter__(self):
+ self._grpc_server.start()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._grpc_server.stop(None)
+ return False
+
+
+def server(service_implementations, multi_method_implementation,
+ request_deserializers, response_serializers, thread_pool,
+ thread_pool_size):
+ generic_rpc_handler = _GenericRpcHandler(service_implementations,
+ multi_method_implementation,
+ request_deserializers,
+ response_serializers)
+ if thread_pool is None:
+ effective_thread_pool = logging_pool.pool(
+ _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size
+ )
+ else:
+ effective_thread_pool = thread_pool
+ return _Server(
+ grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
diff --git a/contrib/python/grpcio/py3/grpc/beta/implementations.py b/contrib/python/grpcio/py3/grpc/beta/implementations.py
new file mode 100644
index 0000000000..43312aac7c
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/implementations.py
@@ -0,0 +1,311 @@
+# Copyright 2015-2016 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Entry points into the Beta API of gRPC Python."""
+
+# threading is referenced from specification in this module.
+import threading # pylint: disable=unused-import
+
+# interfaces, cardinality, and face are referenced from specification in this
+# module.
+import grpc
+from grpc import _auth
+from grpc.beta import _client_adaptations
+from grpc.beta import _metadata
+from grpc.beta import _server_adaptations
+from grpc.beta import interfaces # pylint: disable=unused-import
+from grpc.framework.common import cardinality # pylint: disable=unused-import
+from grpc.framework.interfaces.face import \
+ face # pylint: disable=unused-import
+
+# pylint: disable=too-many-arguments
+
+ChannelCredentials = grpc.ChannelCredentials
+ssl_channel_credentials = grpc.ssl_channel_credentials
+CallCredentials = grpc.CallCredentials
+
+
+def metadata_call_credentials(metadata_plugin, name=None):
+
+ def plugin(context, callback):
+
+ def wrapped_callback(beta_metadata, error):
+ callback(_metadata.unbeta(beta_metadata), error)
+
+ metadata_plugin(context, wrapped_callback)
+
+ return grpc.metadata_call_credentials(plugin, name=name)
+
+
+def google_call_credentials(credentials):
+ """Construct CallCredentials from GoogleCredentials.
+
+ Args:
+ credentials: A GoogleCredentials object from the oauth2client library.
+
+ Returns:
+ A CallCredentials object for use in a GRPCCallOptions object.
+ """
+ return metadata_call_credentials(_auth.GoogleCallCredentials(credentials))
+
+
+access_token_call_credentials = grpc.access_token_call_credentials
+composite_call_credentials = grpc.composite_call_credentials
+composite_channel_credentials = grpc.composite_channel_credentials
+
+
+class Channel(object):
+ """A channel to a remote host through which RPCs may be conducted.
+
+ Only the "subscribe" and "unsubscribe" methods are supported for application
+ use. This class' instance constructor and all other attributes are
+ unsupported.
+ """
+
+ def __init__(self, channel):
+ self._channel = channel
+
+ def subscribe(self, callback, try_to_connect=None):
+ """Subscribes to this Channel's connectivity.
+
+ Args:
+ callback: A callable to be invoked and passed an
+ interfaces.ChannelConnectivity identifying this Channel's connectivity.
+ The callable will be invoked immediately upon subscription and again for
+ every change to this Channel's connectivity thereafter until it is
+ unsubscribed.
+ try_to_connect: A boolean indicating whether or not this Channel should
+ attempt to connect if it is not already connected and ready to conduct
+ RPCs.
+ """
+ self._channel.subscribe(callback, try_to_connect=try_to_connect)
+
+ def unsubscribe(self, callback):
+ """Unsubscribes a callback from this Channel's connectivity.
+
+ Args:
+ callback: A callable previously registered with this Channel from having
+ been passed to its "subscribe" method.
+ """
+ self._channel.unsubscribe(callback)
+
+
+def insecure_channel(host, port):
+ """Creates an insecure Channel to a remote host.
+
+ Args:
+ host: The name of the remote host to which to connect.
+ port: The port of the remote host to which to connect.
+ If None only the 'host' part will be used.
+
+ Returns:
+ A Channel to the remote host through which RPCs may be conducted.
+ """
+ channel = grpc.insecure_channel(host if port is None else '%s:%d' %
+ (host, port))
+ return Channel(channel)
+
+
+def secure_channel(host, port, channel_credentials):
+ """Creates a secure Channel to a remote host.
+
+ Args:
+ host: The name of the remote host to which to connect.
+ port: The port of the remote host to which to connect.
+ If None only the 'host' part will be used.
+ channel_credentials: A ChannelCredentials.
+
+ Returns:
+ A secure Channel to the remote host through which RPCs may be conducted.
+ """
+ channel = grpc.secure_channel(
+ host if port is None else '%s:%d' % (host, port), channel_credentials)
+ return Channel(channel)
+
+
+class StubOptions(object):
+ """A value encapsulating the various options for creation of a Stub.
+
+ This class and its instances have no supported interface - it exists to define
+ the type of its instances and its instances exist to be passed to other
+ functions.
+ """
+
+ def __init__(self, host, request_serializers, response_deserializers,
+ metadata_transformer, thread_pool, thread_pool_size):
+ self.host = host
+ self.request_serializers = request_serializers
+ self.response_deserializers = response_deserializers
+ self.metadata_transformer = metadata_transformer
+ self.thread_pool = thread_pool
+ self.thread_pool_size = thread_pool_size
+
+
+_EMPTY_STUB_OPTIONS = StubOptions(None, None, None, None, None, None)
+
+
+def stub_options(host=None,
+ request_serializers=None,
+ response_deserializers=None,
+ metadata_transformer=None,
+ thread_pool=None,
+ thread_pool_size=None):
+ """Creates a StubOptions value to be passed at stub creation.
+
+ All parameters are optional and should always be passed by keyword.
+
+ Args:
+ host: A host string to set on RPC calls.
+ request_serializers: A dictionary from service name-method name pair to
+ request serialization behavior.
+ response_deserializers: A dictionary from service name-method name pair to
+ response deserialization behavior.
+ metadata_transformer: A callable that given a metadata object produces
+ another metadata object to be used in the underlying communication on the
+ wire.
+ thread_pool: A thread pool to use in stubs.
+ thread_pool_size: The size of thread pool to create for use in stubs;
+ ignored if thread_pool has been passed.
+
+ Returns:
+ A StubOptions value created from the passed parameters.
+ """
+ return StubOptions(host, request_serializers, response_deserializers,
+ metadata_transformer, thread_pool, thread_pool_size)
+
+
+def generic_stub(channel, options=None):
+ """Creates a face.GenericStub on which RPCs can be made.
+
+ Args:
+ channel: A Channel for use by the created stub.
+ options: A StubOptions customizing the created stub.
+
+ Returns:
+ A face.GenericStub on which RPCs can be made.
+ """
+ effective_options = _EMPTY_STUB_OPTIONS if options is None else options
+ return _client_adaptations.generic_stub(
+ channel._channel, # pylint: disable=protected-access
+ effective_options.host,
+ effective_options.metadata_transformer,
+ effective_options.request_serializers,
+ effective_options.response_deserializers)
+
+
+def dynamic_stub(channel, service, cardinalities, options=None):
+ """Creates a face.DynamicStub with which RPCs can be invoked.
+
+ Args:
+ channel: A Channel for the returned face.DynamicStub to use.
+ service: The package-qualified full name of the service.
+ cardinalities: A dictionary from RPC method name to cardinality.Cardinality
+ value identifying the cardinality of the RPC method.
+ options: An optional StubOptions value further customizing the functionality
+ of the returned face.DynamicStub.
+
+ Returns:
+ A face.DynamicStub with which RPCs can be invoked.
+ """
+ effective_options = _EMPTY_STUB_OPTIONS if options is None else options
+ return _client_adaptations.dynamic_stub(
+ channel._channel, # pylint: disable=protected-access
+ service,
+ cardinalities,
+ effective_options.host,
+ effective_options.metadata_transformer,
+ effective_options.request_serializers,
+ effective_options.response_deserializers)
+
+
+ServerCredentials = grpc.ServerCredentials
+ssl_server_credentials = grpc.ssl_server_credentials
+
+
+class ServerOptions(object):
+ """A value encapsulating the various options for creation of a Server.
+
+ This class and its instances have no supported interface - it exists to define
+ the type of its instances and its instances exist to be passed to other
+ functions.
+ """
+
+ def __init__(self, multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size,
+ default_timeout, maximum_timeout):
+ self.multi_method_implementation = multi_method_implementation
+ self.request_deserializers = request_deserializers
+ self.response_serializers = response_serializers
+ self.thread_pool = thread_pool
+ self.thread_pool_size = thread_pool_size
+ self.default_timeout = default_timeout
+ self.maximum_timeout = maximum_timeout
+
+
+_EMPTY_SERVER_OPTIONS = ServerOptions(None, None, None, None, None, None, None)
+
+
+def server_options(multi_method_implementation=None,
+ request_deserializers=None,
+ response_serializers=None,
+ thread_pool=None,
+ thread_pool_size=None,
+ default_timeout=None,
+ maximum_timeout=None):
+ """Creates a ServerOptions value to be passed at server creation.
+
+ All parameters are optional and should always be passed by keyword.
+
+ Args:
+ multi_method_implementation: A face.MultiMethodImplementation to be called
+ to service an RPC if the server has no specific method implementation for
+ the name of the RPC for which service was requested.
+ request_deserializers: A dictionary from service name-method name pair to
+ request deserialization behavior.
+ response_serializers: A dictionary from service name-method name pair to
+ response serialization behavior.
+ thread_pool: A thread pool to use in stubs.
+ thread_pool_size: The size of thread pool to create for use in stubs;
+ ignored if thread_pool has been passed.
+ default_timeout: A duration in seconds to allow for RPC service when
+ servicing RPCs that did not include a timeout value when invoked.
+ maximum_timeout: A duration in seconds to allow for RPC service when
+ servicing RPCs no matter what timeout value was passed when the RPC was
+ invoked.
+
+ Returns:
+ A StubOptions value created from the passed parameters.
+ """
+ return ServerOptions(multi_method_implementation, request_deserializers,
+ response_serializers, thread_pool, thread_pool_size,
+ default_timeout, maximum_timeout)
+
+
+def server(service_implementations, options=None):
+ """Creates an interfaces.Server with which RPCs can be serviced.
+
+ Args:
+ service_implementations: A dictionary from service name-method name pair to
+ face.MethodImplementation.
+ options: An optional ServerOptions value further customizing the
+ functionality of the returned Server.
+
+ Returns:
+ An interfaces.Server with which RPCs can be serviced.
+ """
+ effective_options = _EMPTY_SERVER_OPTIONS if options is None else options
+ return _server_adaptations.server(
+ service_implementations, effective_options.multi_method_implementation,
+ effective_options.request_deserializers,
+ effective_options.response_serializers, effective_options.thread_pool,
+ effective_options.thread_pool_size)
diff --git a/contrib/python/grpcio/py3/grpc/beta/interfaces.py b/contrib/python/grpcio/py3/grpc/beta/interfaces.py
new file mode 100644
index 0000000000..e29b173a43
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/interfaces.py
@@ -0,0 +1,163 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Constants and interfaces of the Beta API of gRPC Python."""
+
+import abc
+
+import grpc
+
+ChannelConnectivity = grpc.ChannelConnectivity
+# FATAL_FAILURE was a Beta-API name for SHUTDOWN
+ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN
+
+StatusCode = grpc.StatusCode
+
+
+class GRPCCallOptions(object):
+ """A value encapsulating gRPC-specific options passed on RPC invocation.
+
+ This class and its instances have no supported interface - it exists to
+ define the type of its instances and its instances exist to be passed to
+ other functions.
+ """
+
+ def __init__(self, disable_compression, subcall_of, credentials):
+ self.disable_compression = disable_compression
+ self.subcall_of = subcall_of
+ self.credentials = credentials
+
+
+def grpc_call_options(disable_compression=False, credentials=None):
+ """Creates a GRPCCallOptions value to be passed at RPC invocation.
+
+ All parameters are optional and should always be passed by keyword.
+
+ Args:
+ disable_compression: A boolean indicating whether or not compression should
+ be disabled for the request object of the RPC. Only valid for
+ request-unary RPCs.
+ credentials: A CallCredentials object to use for the invoked RPC.
+ """
+ return GRPCCallOptions(disable_compression, None, credentials)
+
+
+GRPCAuthMetadataContext = grpc.AuthMetadataContext
+GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback
+GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin
+
+
+class GRPCServicerContext(abc.ABC):
+ """Exposes gRPC-specific options and behaviors to code servicing RPCs."""
+
+ @abc.abstractmethod
+ def peer(self):
+ """Identifies the peer that invoked the RPC being serviced.
+
+ Returns:
+ A string identifying the peer that invoked the RPC being serviced.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def disable_next_response_compression(self):
+ """Disables compression of the next response passed by the application."""
+ raise NotImplementedError()
+
+
+class GRPCInvocationContext(abc.ABC):
+ """Exposes gRPC-specific options and behaviors to code invoking RPCs."""
+
+ @abc.abstractmethod
+ def disable_next_request_compression(self):
+ """Disables compression of the next request passed by the application."""
+ raise NotImplementedError()
+
+
+class Server(abc.ABC):
+ """Services RPCs."""
+
+ @abc.abstractmethod
+ def add_insecure_port(self, address):
+ """Reserves a port for insecure RPC service once this Server becomes active.
+
+ This method may only be called before calling this Server's start method is
+ called.
+
+ Args:
+ address: The address for which to open a port.
+
+ Returns:
+ An integer port on which RPCs will be serviced after this link has been
+ started. This is typically the same number as the port number contained
+ in the passed address, but will likely be different if the port number
+ contained in the passed address was zero.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_secure_port(self, address, server_credentials):
+ """Reserves a port for secure RPC service after this Server becomes active.
+
+ This method may only be called before calling this Server's start method is
+ called.
+
+ Args:
+ address: The address for which to open a port.
+ server_credentials: A ServerCredentials.
+
+ Returns:
+ An integer port on which RPCs will be serviced after this link has been
+ started. This is typically the same number as the port number contained
+ in the passed address, but will likely be different if the port number
+ contained in the passed address was zero.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this Server's service of RPCs.
+
+ This method may only be called while the server is not serving RPCs (i.e. it
+ is not idempotent).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this Server's service of RPCs.
+
+ All calls to this method immediately stop service of new RPCs. When existing
+ RPCs are aborted is controlled by the grace period parameter passed to this
+ method.
+
+ This method may be called at any time and is idempotent. Passing a smaller
+ grace value than has been passed in a previous call will have the effect of
+ stopping the Server sooner. Passing a larger grace value than has been
+ passed in a previous call will not have the effect of stopping the server
+ later.
+
+ Args:
+ grace: A duration of time in seconds to allow existing RPCs to complete
+ before being aborted by this Server's stopping. May be zero for
+ immediate abortion of all in-progress RPCs.
+
+ Returns:
+ A threading.Event that will be set when this Server has completely
+ stopped. The returned event may not be set until after the full grace
+ period (if some ongoing RPC continues for the full length of the period)
+ of it may be set much sooner (such as if this Server had no RPCs underway
+ at the time it was stopped or if all RPCs that it had underway completed
+ very early in the grace period).
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py3/grpc/beta/utilities.py b/contrib/python/grpcio/py3/grpc/beta/utilities.py
new file mode 100644
index 0000000000..fe3ce606c9
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/beta/utilities.py
@@ -0,0 +1,149 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for the gRPC Python Beta API."""
+
+import threading
+import time
+
+# implementations is referenced from specification in this module.
+from grpc.beta import implementations # pylint: disable=unused-import
+from grpc.beta import interfaces
+from grpc.framework.foundation import callable_util
+from grpc.framework.foundation import future
+
+_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = (
+ 'Exception calling connectivity future "done" callback!')
+
+
+class _ChannelReadyFuture(future.Future):
+
+ def __init__(self, channel):
+ self._condition = threading.Condition()
+ self._channel = channel
+
+ self._matured = False
+ self._cancelled = False
+ self._done_callbacks = []
+
+ def _block(self, timeout):
+ until = None if timeout is None else time.time() + timeout
+ with self._condition:
+ while True:
+ if self._cancelled:
+ raise future.CancelledError()
+ elif self._matured:
+ return
+ else:
+ if until is None:
+ self._condition.wait()
+ else:
+ remaining = until - time.time()
+ if remaining < 0:
+ raise future.TimeoutError()
+ else:
+ self._condition.wait(timeout=remaining)
+
+ def _update(self, connectivity):
+ with self._condition:
+ if (not self._cancelled and
+ connectivity is interfaces.ChannelConnectivity.READY):
+ self._matured = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ def cancel(self):
+ with self._condition:
+ if not self._matured:
+ self._cancelled = True
+ self._channel.unsubscribe(self._update)
+ self._condition.notify_all()
+ done_callbacks = tuple(self._done_callbacks)
+ self._done_callbacks = None
+ else:
+ return False
+
+ for done_callback in done_callbacks:
+ callable_util.call_logging_exceptions(
+ done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self)
+
+ return True
+
+ def cancelled(self):
+ with self._condition:
+ return self._cancelled
+
+ def running(self):
+ with self._condition:
+ return not self._cancelled and not self._matured
+
+ def done(self):
+ with self._condition:
+ return self._cancelled or self._matured
+
+ def result(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def exception(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def traceback(self, timeout=None):
+ self._block(timeout)
+ return None
+
+ def add_done_callback(self, fn):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._done_callbacks.append(fn)
+ return
+
+ fn(self)
+
+ def start(self):
+ with self._condition:
+ self._channel.subscribe(self._update, try_to_connect=True)
+
+ def __del__(self):
+ with self._condition:
+ if not self._cancelled and not self._matured:
+ self._channel.unsubscribe(self._update)
+
+
+def channel_ready_future(channel):
+ """Creates a future.Future tracking when an implementations.Channel is ready.
+
+ Cancelling the returned future.Future does not tell the given
+ implementations.Channel to abandon attempts it may have been making to
+ connect; cancelling merely deactivates the return future.Future's
+ subscription to the given implementations.Channel's connectivity.
+
+ Args:
+ channel: An implementations.Channel.
+
+ Returns:
+ A future.Future that matures when the given Channel has connectivity
+ interfaces.ChannelConnectivity.READY.
+ """
+ ready_future = _ChannelReadyFuture(channel)
+ ready_future.start()
+ return ready_future
diff --git a/contrib/python/grpcio/py3/grpc/experimental/__init__.py b/contrib/python/grpcio/py3/grpc/experimental/__init__.py
new file mode 100644
index 0000000000..f0d142c981
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/experimental/__init__.py
@@ -0,0 +1,128 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's experimental APIs.
+
+These APIs are subject to be removed during any minor version release.
+"""
+
+import copy
+import functools
+import sys
+import warnings
+
+import grpc
+from grpc._cython import cygrpc as _cygrpc
+
+_EXPERIMENTAL_APIS_USED = set()
+
+
+class ChannelOptions(object):
+ """Indicates a channel option unique to gRPC Python.
+
+ This enumeration is part of an EXPERIMENTAL API.
+
+ Attributes:
+ SingleThreadedUnaryStream: Perform unary-stream RPCs on a single thread.
+ """
+ SingleThreadedUnaryStream = "SingleThreadedUnaryStream"
+
+
+class UsageError(Exception):
+ """Raised by the gRPC library to indicate usage not allowed by the API."""
+
+
+# It's important that there be a single insecure credentials object so that its
+# hash is deterministic and can be used for indexing in the simple stubs cache.
+_insecure_channel_credentials = grpc.ChannelCredentials(
+ _cygrpc.channel_credentials_insecure())
+
+
+def insecure_channel_credentials():
+ """Creates a ChannelCredentials for use with an insecure channel.
+
+ THIS IS AN EXPERIMENTAL API.
+ """
+ return _insecure_channel_credentials
+
+
+class ExperimentalApiWarning(Warning):
+ """A warning that an API is experimental."""
+
+
+def _warn_experimental(api_name, stack_offset):
+ if api_name not in _EXPERIMENTAL_APIS_USED:
+ _EXPERIMENTAL_APIS_USED.add(api_name)
+ msg = ("'{}' is an experimental API. It is subject to change or ".
+ format(api_name) +
+ "removal between minor releases. Proceed with caution.")
+ warnings.warn(msg, ExperimentalApiWarning, stacklevel=2 + stack_offset)
+
+
+def experimental_api(f):
+
+ @functools.wraps(f)
+ def _wrapper(*args, **kwargs):
+ _warn_experimental(f.__name__, 1)
+ return f(*args, **kwargs)
+
+ return _wrapper
+
+
+def wrap_server_method_handler(wrapper, handler):
+ """Wraps the server method handler function.
+
+ The server implementation requires all server handlers being wrapped as
+ RpcMethodHandler objects. This helper function ease the pain of writing
+ server handler wrappers.
+
+ Args:
+ wrapper: A wrapper function that takes in a method handler behavior
+ (the actual function) and returns a wrapped function.
+ handler: A RpcMethodHandler object to be wrapped.
+
+ Returns:
+ A newly created RpcMethodHandler.
+ """
+ if not handler:
+ return None
+
+ if not handler.request_streaming:
+ if not handler.response_streaming:
+ # NOTE(lidiz) _replace is a public API:
+ # https://docs.python.org/dev/library/collections.html
+ return handler._replace(unary_unary=wrapper(handler.unary_unary))
+ else:
+ return handler._replace(unary_stream=wrapper(handler.unary_stream))
+ else:
+ if not handler.response_streaming:
+ return handler._replace(stream_unary=wrapper(handler.stream_unary))
+ else:
+ return handler._replace(
+ stream_stream=wrapper(handler.stream_stream))
+
+
+__all__ = (
+ 'ChannelOptions',
+ 'ExperimentalApiWarning',
+ 'UsageError',
+ 'insecure_channel_credentials',
+ 'wrap_server_method_handler',
+)
+
+if sys.version_info > (3, 6):
+ from grpc._simple_stubs import stream_stream
+ from grpc._simple_stubs import stream_unary
+ from grpc._simple_stubs import unary_stream
+ from grpc._simple_stubs import unary_unary
+ __all__ = __all__ + (unary_unary, unary_stream, stream_unary, stream_stream)
diff --git a/contrib/python/grpcio/py3/grpc/experimental/aio/__init__.py b/contrib/python/grpcio/py3/grpc/experimental/aio/__init__.py
new file mode 100644
index 0000000000..576cb8dcde
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/experimental/aio/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2020 The gRPC Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Alias of grpc.aio to keep backward compatibility."""
+
+from grpc.aio import *
diff --git a/contrib/python/grpcio/py3/grpc/experimental/gevent.py b/contrib/python/grpcio/py3/grpc/experimental/gevent.py
new file mode 100644
index 0000000000..159d612b4e
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/experimental/gevent.py
@@ -0,0 +1,27 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's Python gEvent APIs."""
+
+from grpc._cython import cygrpc as _cygrpc
+
+
+def init_gevent():
+ """Patches gRPC's libraries to be compatible with gevent.
+
+ This must be called AFTER the python standard lib has been patched,
+ but BEFORE creating and gRPC objects.
+
+ In order for progress to be made, the application must drive the event loop.
+ """
+ _cygrpc.init_grpc_gevent()
diff --git a/contrib/python/grpcio/py3/grpc/experimental/session_cache.py b/contrib/python/grpcio/py3/grpc/experimental/session_cache.py
new file mode 100644
index 0000000000..5c55f7c327
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/experimental/session_cache.py
@@ -0,0 +1,45 @@
+# Copyright 2018 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC's APIs for TLS Session Resumption support"""
+
+from grpc._cython import cygrpc as _cygrpc
+
+
+def ssl_session_cache_lru(capacity):
+ """Creates an SSLSessionCache with LRU replacement policy
+
+ Args:
+ capacity: Size of the cache
+
+ Returns:
+ An SSLSessionCache with LRU replacement policy that can be passed as a value for
+ the grpc.ssl_session_cache option to a grpc.Channel. SSL session caches are used
+ to store session tickets, which clients can present to resume previous TLS sessions
+ with a server.
+ """
+ return SSLSessionCache(_cygrpc.SSLSessionCacheLRU(capacity))
+
+
+class SSLSessionCache(object):
+ """An encapsulation of a session cache used for TLS session resumption.
+
+ Instances of this class can be passed to a Channel as values for the
+ grpc.ssl_session_cache option
+ """
+
+ def __init__(self, cache):
+ self._cache = cache
+
+ def __int__(self):
+ return int(self._cache)
diff --git a/contrib/python/grpcio/py3/grpc/framework/__init__.py b/contrib/python/grpcio/py3/grpc/framework/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/framework/common/__init__.py b/contrib/python/grpcio/py3/grpc/framework/common/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/framework/common/cardinality.py b/contrib/python/grpcio/py3/grpc/framework/common/cardinality.py
new file mode 100644
index 0000000000..c98735622d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/common/cardinality.py
@@ -0,0 +1,26 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Defines an enum for classifying RPC methods by streaming semantics."""
+
+import enum
+
+
+@enum.unique
+class Cardinality(enum.Enum):
+ """Describes the streaming semantics of an RPC method."""
+
+ UNARY_UNARY = 'request-unary/response-unary'
+ UNARY_STREAM = 'request-unary/response-streaming'
+ STREAM_UNARY = 'request-streaming/response-unary'
+ STREAM_STREAM = 'request-streaming/response-streaming'
diff --git a/contrib/python/grpcio/py3/grpc/framework/common/style.py b/contrib/python/grpcio/py3/grpc/framework/common/style.py
new file mode 100644
index 0000000000..f6138d417f
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/common/style.py
@@ -0,0 +1,24 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Defines an enum for classifying RPC methods by control flow semantics."""
+
+import enum
+
+
+@enum.unique
+class Service(enum.Enum):
+ """Describes the control flow style of RPC method implementation."""
+
+ INLINE = 'inline'
+ EVENT = 'event'
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/__init__.py b/contrib/python/grpcio/py3/grpc/framework/foundation/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/abandonment.py b/contrib/python/grpcio/py3/grpc/framework/foundation/abandonment.py
new file mode 100644
index 0000000000..660ce991c4
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/abandonment.py
@@ -0,0 +1,22 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for indicating abandonment of computation."""
+
+
+class Abandoned(Exception):
+ """Indicates that some computation is being abandoned.
+
+ Abandoning a computation is different than returning a value or raising
+ an exception indicating some operational or programming defect.
+ """
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/callable_util.py b/contrib/python/grpcio/py3/grpc/framework/foundation/callable_util.py
new file mode 100644
index 0000000000..0a638eb62e
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/callable_util.py
@@ -0,0 +1,94 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for working with callables."""
+
+from abc import ABC
+import collections
+import enum
+import functools
+import logging
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Outcome(ABC):
+ """A sum type describing the outcome of some call.
+
+ Attributes:
+ kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the
+ call returned a value or raised an exception.
+ return_value: The value returned by the call. Must be present if kind is
+ Kind.RETURNED.
+ exception: The exception raised by the call. Must be present if kind is
+ Kind.RAISED.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+ """Identifies the general kind of the outcome of some call."""
+
+ RETURNED = object()
+ RAISED = object()
+
+
+class _EasyOutcome(
+ collections.namedtuple('_EasyOutcome',
+ ['kind', 'return_value', 'exception']), Outcome):
+ """A trivial implementation of Outcome."""
+
+
+def _call_logging_exceptions(behavior, message, *args, **kwargs):
+ try:
+ return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs),
+ None)
+ except Exception as e: # pylint: disable=broad-except
+ _LOGGER.exception(message)
+ return _EasyOutcome(Outcome.Kind.RAISED, None, e)
+
+
+def with_exceptions_logged(behavior, message):
+ """Wraps a callable in a try-except that logs any exceptions it raises.
+
+ Args:
+ behavior: Any callable.
+ message: A string to log if the behavior raises an exception.
+
+ Returns:
+ A callable that when executed invokes the given behavior. The returned
+ callable takes the same arguments as the given behavior but returns a
+ future.Outcome describing whether the given behavior returned a value or
+ raised an exception.
+ """
+
+ @functools.wraps(behavior)
+ def wrapped_behavior(*args, **kwargs):
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
+
+ return wrapped_behavior
+
+
+def call_logging_exceptions(behavior, message, *args, **kwargs):
+ """Calls a behavior in a try-except that logs any exceptions it raises.
+
+ Args:
+ behavior: Any callable.
+ message: A string to log if the behavior raises an exception.
+ *args: Positional arguments to pass to the given behavior.
+ **kwargs: Keyword arguments to pass to the given behavior.
+
+ Returns:
+ An Outcome describing whether the given behavior returned a value or raised
+ an exception.
+ """
+ return _call_logging_exceptions(behavior, message, *args, **kwargs)
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/future.py b/contrib/python/grpcio/py3/grpc/framework/foundation/future.py
new file mode 100644
index 0000000000..c7996aa8a5
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/future.py
@@ -0,0 +1,219 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""A Future interface.
+
+Python doesn't have a Future interface in its standard library. In the absence
+of such a standard, three separate, incompatible implementations
+(concurrent.futures.Future, ndb.Future, and asyncio.Future) have appeared. This
+interface attempts to be as compatible as possible with
+concurrent.futures.Future. From ndb.Future it adopts a traceback-object accessor
+method.
+
+Unlike the concrete and implemented Future classes listed above, the Future
+class defined in this module is an entirely abstract interface that anyone may
+implement and use.
+
+The one known incompatibility between this interface and the interface of
+concurrent.futures.Future is that this interface defines its own CancelledError
+and TimeoutError exceptions rather than raising the implementation-private
+concurrent.futures._base.CancelledError and the
+built-in-but-only-in-3.3-and-later TimeoutError.
+"""
+
+import abc
+
+
+class TimeoutError(Exception):
+ """Indicates that a particular call timed out."""
+
+
+class CancelledError(Exception):
+ """Indicates that the computation underlying a Future was cancelled."""
+
+
+class Future(abc.ABC):
+ """A representation of a computation in another control flow.
+
+ Computations represented by a Future may be yet to be begun, may be ongoing,
+ or may have already completed.
+ """
+
+ # NOTE(nathaniel): This isn't the return type that I would want to have if it
+ # were up to me. Were this interface being written from scratch, the return
+ # type of this method would probably be a sum type like:
+ #
+ # NOT_COMMENCED
+ # COMMENCED_AND_NOT_COMPLETED
+ # PARTIAL_RESULT<Partial_Result_Type>
+ # COMPLETED<Result_Type>
+ # UNCANCELLABLE
+ # NOT_IMMEDIATELY_DETERMINABLE
+ @abc.abstractmethod
+ def cancel(self):
+ """Attempts to cancel the computation.
+
+ This method does not block.
+
+ Returns:
+ True if the computation has not yet begun, will not be allowed to take
+ place, and determination of both was possible without blocking. False
+ under all other circumstances including but not limited to the
+ computation's already having begun, the computation's already having
+ finished, and the computation's having been scheduled for execution on a
+ remote system for which a determination of whether or not it commenced
+ before being cancelled cannot be made without blocking.
+ """
+ raise NotImplementedError()
+
+ # NOTE(nathaniel): Here too this isn't the return type that I'd want this
+ # method to have if it were up to me. I think I'd go with another sum type
+ # like:
+ #
+ # NOT_CANCELLED (this object's cancel method hasn't been called)
+ # NOT_COMMENCED
+ # COMMENCED_AND_NOT_COMPLETED
+ # PARTIAL_RESULT<Partial_Result_Type>
+ # COMPLETED<Result_Type>
+ # UNCANCELLABLE
+ # NOT_IMMEDIATELY_DETERMINABLE
+ #
+ # Notice how giving the cancel method the right semantics obviates most
+ # reasons for this method to exist.
+ @abc.abstractmethod
+ def cancelled(self):
+ """Describes whether the computation was cancelled.
+
+ This method does not block.
+
+ Returns:
+ True if the computation was cancelled any time before its result became
+ immediately available. False under all other circumstances including but
+ not limited to this object's cancel method not having been called and
+ the computation's result having become immediately available.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def running(self):
+ """Describes whether the computation is taking place.
+
+ This method does not block.
+
+ Returns:
+ True if the computation is scheduled to take place in the future or is
+ taking place now, or False if the computation took place in the past or
+ was cancelled.
+ """
+ raise NotImplementedError()
+
+ # NOTE(nathaniel): These aren't quite the semantics I'd like here either. I
+ # would rather this only returned True in cases in which the underlying
+ # computation completed successfully. A computation's having been cancelled
+ # conflicts with considering that computation "done".
+ @abc.abstractmethod
+ def done(self):
+ """Describes whether the computation has taken place.
+
+ This method does not block.
+
+ Returns:
+ True if the computation is known to have either completed or have been
+ unscheduled or interrupted. False if the computation may possibly be
+ executing or scheduled to execute later.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def result(self, timeout=None):
+ """Accesses the outcome of the computation or raises its exception.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ finish or be cancelled, or None if this method should block until the
+ computation has finished or is cancelled no matter how long that takes.
+
+ Returns:
+ The return value of the computation.
+
+ Raises:
+ TimeoutError: If a timeout value is passed and the computation does not
+ terminate within the allotted time.
+ CancelledError: If the computation was cancelled.
+ Exception: If the computation raised an exception, this call will raise
+ the same exception.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def exception(self, timeout=None):
+ """Return the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled, or None if this method should block until
+ the computation is terminated or is cancelled no matter how long that
+ takes.
+
+ Returns:
+ The exception raised by the computation, or None if the computation did
+ not raise an exception.
+
+ Raises:
+ TimeoutError: If a timeout value is passed and the computation does not
+ terminate within the allotted time.
+ CancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def traceback(self, timeout=None):
+ """Access the traceback of the exception raised by the computation.
+
+ This method may return immediately or may block.
+
+ Args:
+ timeout: The length of time in seconds to wait for the computation to
+ terminate or be cancelled, or None if this method should block until
+ the computation is terminated or is cancelled no matter how long that
+ takes.
+
+ Returns:
+ The traceback of the exception raised by the computation, or None if the
+ computation did not raise an exception.
+
+ Raises:
+ TimeoutError: If a timeout value is passed and the computation does not
+ terminate within the allotted time.
+ CancelledError: If the computation was cancelled.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_done_callback(self, fn):
+ """Adds a function to be called at completion of the computation.
+
+ The callback will be passed this Future object describing the outcome of
+ the computation.
+
+ If the computation has already completed, the callback will be called
+ immediately.
+
+ Args:
+ fn: A callable taking this Future object as its single parameter.
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/logging_pool.py b/contrib/python/grpcio/py3/grpc/framework/foundation/logging_pool.py
new file mode 100644
index 0000000000..53d2cd0082
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/logging_pool.py
@@ -0,0 +1,71 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""A thread pool that logs exceptions raised by tasks executed within it."""
+
+from concurrent import futures
+import logging
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _wrap(behavior):
+ """Wraps an arbitrary callable behavior in exception-logging."""
+
+ def _wrapping(*args, **kwargs):
+ try:
+ return behavior(*args, **kwargs)
+ except Exception:
+ _LOGGER.exception(
+ 'Unexpected exception from %s executed in logging pool!',
+ behavior)
+ raise
+
+ return _wrapping
+
+
+class _LoggingPool(object):
+ """An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
+
+ def __init__(self, backing_pool):
+ self._backing_pool = backing_pool
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._backing_pool.shutdown(wait=True)
+
+ def submit(self, fn, *args, **kwargs):
+ return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
+
+ def map(self, func, *iterables, **kwargs):
+ return self._backing_pool.map(_wrap(func),
+ *iterables,
+ timeout=kwargs.get('timeout', None))
+
+ def shutdown(self, wait=True):
+ self._backing_pool.shutdown(wait=wait)
+
+
+def pool(max_workers):
+ """Creates a thread pool that logs exceptions raised by the tasks within it.
+
+ Args:
+ max_workers: The maximum number of worker threads to allow the pool.
+
+ Returns:
+ A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
+ raised by the tasks executed within it.
+ """
+ return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/stream.py b/contrib/python/grpcio/py3/grpc/framework/foundation/stream.py
new file mode 100644
index 0000000000..150a22435e
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/stream.py
@@ -0,0 +1,43 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interfaces related to streams of values or objects."""
+
+import abc
+
+
+class Consumer(abc.ABC):
+ """Interface for consumers of finite streams of values or objects."""
+
+ @abc.abstractmethod
+ def consume(self, value):
+ """Accepts a value.
+
+ Args:
+ value: Any value accepted by this Consumer.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminate(self):
+ """Indicates to this Consumer that no more values will be supplied."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def consume_and_terminate(self, value):
+ """Supplies a value and signals that no more values will be supplied.
+
+ Args:
+ value: Any value accepted by this Consumer.
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py3/grpc/framework/foundation/stream_util.py b/contrib/python/grpcio/py3/grpc/framework/foundation/stream_util.py
new file mode 100644
index 0000000000..1faaf29bd7
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/foundation/stream_util.py
@@ -0,0 +1,148 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Helpful utilities related to the stream module."""
+
+import logging
+import threading
+
+from grpc.framework.foundation import stream
+
+_NO_VALUE = object()
+_LOGGER = logging.getLogger(__name__)
+
+
+class TransformingConsumer(stream.Consumer):
+ """A stream.Consumer that passes a transformation of its input to another."""
+
+ def __init__(self, transformation, downstream):
+ self._transformation = transformation
+ self._downstream = downstream
+
+ def consume(self, value):
+ self._downstream.consume(self._transformation(value))
+
+ def terminate(self):
+ self._downstream.terminate()
+
+ def consume_and_terminate(self, value):
+ self._downstream.consume_and_terminate(self._transformation(value))
+
+
+class IterableConsumer(stream.Consumer):
+ """A Consumer that when iterated over emits the values it has consumed."""
+
+ def __init__(self):
+ self._condition = threading.Condition()
+ self._values = []
+ self._active = True
+
+ def consume(self, value):
+ with self._condition:
+ if self._active:
+ self._values.append(value)
+ self._condition.notify()
+
+ def terminate(self):
+ with self._condition:
+ self._active = False
+ self._condition.notify()
+
+ def consume_and_terminate(self, value):
+ with self._condition:
+ if self._active:
+ self._values.append(value)
+ self._active = False
+ self._condition.notify()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self.next()
+
+ def next(self):
+ with self._condition:
+ while self._active and not self._values:
+ self._condition.wait()
+ if self._values:
+ return self._values.pop(0)
+ else:
+ raise StopIteration()
+
+
+class ThreadSwitchingConsumer(stream.Consumer):
+ """A Consumer decorator that affords serialization and asynchrony."""
+
+ def __init__(self, sink, pool):
+ self._lock = threading.Lock()
+ self._sink = sink
+ self._pool = pool
+ # True if self._spin has been submitted to the pool to be called once and
+ # that call has not yet returned, False otherwise.
+ self._spinning = False
+ self._values = []
+ self._active = True
+
+ def _spin(self, sink, value, terminate):
+ while True:
+ try:
+ if value is _NO_VALUE:
+ sink.terminate()
+ elif terminate:
+ sink.consume_and_terminate(value)
+ else:
+ sink.consume(value)
+ except Exception as e: # pylint:disable=broad-except
+ _LOGGER.exception(e)
+
+ with self._lock:
+ if terminate:
+ self._spinning = False
+ return
+ elif self._values:
+ value = self._values.pop(0)
+ terminate = not self._values and not self._active
+ elif not self._active:
+ value = _NO_VALUE
+ terminate = True
+ else:
+ self._spinning = False
+ return
+
+ def consume(self, value):
+ with self._lock:
+ if self._active:
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, False)
+ self._spinning = True
+
+ def terminate(self):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if not self._spinning:
+ self._pool.submit(self._spin, self._sink, _NO_VALUE, True)
+ self._spinning = True
+
+ def consume_and_terminate(self, value):
+ with self._lock:
+ if self._active:
+ self._active = False
+ if self._spinning:
+ self._values.append(value)
+ else:
+ self._pool.submit(self._spin, self._sink, value, True)
+ self._spinning = True
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/__init__.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/base/__init__.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/base/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/base/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/base/base.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/base/base.py
new file mode 100644
index 0000000000..8caee325c2
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/base/base.py
@@ -0,0 +1,325 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""The base interface of RPC Framework.
+
+Implementations of this interface support the conduct of "operations":
+exchanges between two distinct ends of an arbitrary number of data payloads
+and metadata such as a name for the operation, initial and terminal metadata
+in each direction, and flow control. These operations may be used for transfers
+of data, remote procedure calls, status indication, or anything else
+applications choose.
+"""
+
+# threading is referenced from specification in this module.
+import abc
+import enum
+import threading # pylint: disable=unused-import
+
+# pylint: disable=too-many-arguments
+
+
+class NoSuchMethodError(Exception):
+ """Indicates that an unrecognized operation has been called.
+
+ Attributes:
+ code: A code value to communicate to the other side of the operation
+ along with indication of operation termination. May be None.
+ details: A details value to communicate to the other side of the
+ operation along with indication of operation termination. May be None.
+ """
+
+ def __init__(self, code, details):
+ """Constructor.
+
+ Args:
+ code: A code value to communicate to the other side of the operation
+ along with indication of operation termination. May be None.
+ details: A details value to communicate to the other side of the
+ operation along with indication of operation termination. May be None.
+ """
+ super(NoSuchMethodError, self).__init__()
+ self.code = code
+ self.details = details
+
+
+class Outcome(object):
+ """The outcome of an operation.
+
+ Attributes:
+ kind: A Kind value coarsely identifying how the operation terminated.
+ code: An application-specific code value or None if no such value was
+ provided.
+ details: An application-specific details value or None if no such value was
+ provided.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+ """Ways in which an operation can terminate."""
+
+ COMPLETED = 'completed'
+ CANCELLED = 'cancelled'
+ EXPIRED = 'expired'
+ LOCAL_SHUTDOWN = 'local shutdown'
+ REMOTE_SHUTDOWN = 'remote shutdown'
+ RECEPTION_FAILURE = 'reception failure'
+ TRANSMISSION_FAILURE = 'transmission failure'
+ LOCAL_FAILURE = 'local failure'
+ REMOTE_FAILURE = 'remote failure'
+
+
+class Completion(abc.ABC):
+ """An aggregate of the values exchanged upon operation completion.
+
+ Attributes:
+ terminal_metadata: A terminal metadata value for the operaton.
+ code: A code value for the operation.
+ message: A message value for the operation.
+ """
+
+
+class OperationContext(abc.ABC):
+ """Provides operation-related information and action."""
+
+ @abc.abstractmethod
+ def outcome(self):
+ """Indicates the operation's outcome (or that the operation is ongoing).
+
+ Returns:
+ None if the operation is still active or the Outcome value for the
+ operation if it has terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_termination_callback(self, callback):
+ """Adds a function to be called upon operation termination.
+
+ Args:
+ callback: A callable to be passed an Outcome value on operation
+ termination.
+
+ Returns:
+ None if the operation has not yet terminated and the passed callback will
+ later be called when it does terminate, or if the operation has already
+ terminated an Outcome value describing the operation termination and the
+ passed callback will not be called as a result of this method call.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the operation.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the operation to complete before it is considered to have
+ timed out. Zero is returned if the operation has terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the operation if the operation has not yet terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def fail(self, exception):
+ """Indicates that the operation has failed.
+
+ Args:
+ exception: An exception germane to the operation failure. May be None.
+ """
+ raise NotImplementedError()
+
+
+class Operator(abc.ABC):
+ """An interface through which to participate in an operation."""
+
+ @abc.abstractmethod
+ def advance(self,
+ initial_metadata=None,
+ payload=None,
+ completion=None,
+ allowance=None):
+ """Progresses the operation.
+
+ Args:
+ initial_metadata: An initial metadata value. Only one may ever be
+ communicated in each direction for an operation, and they must be
+ communicated no later than either the first payload or the completion.
+ payload: A payload value.
+ completion: A Completion value. May only ever be non-None once in either
+ direction, and no payloads may be passed after it has been communicated.
+ allowance: A positive integer communicating the number of additional
+ payloads allowed to be passed by the remote side of the operation.
+ """
+ raise NotImplementedError()
+
+
+class ProtocolReceiver(abc.ABC):
+ """A means of receiving protocol values during an operation."""
+
+ @abc.abstractmethod
+ def context(self, protocol_context):
+ """Accepts the protocol context object for the operation.
+
+ Args:
+ protocol_context: The protocol context object for the operation.
+ """
+ raise NotImplementedError()
+
+
+class Subscription(abc.ABC):
+ """Describes customer code's interest in values from the other side.
+
+ Attributes:
+ kind: A Kind value describing the overall kind of this value.
+ termination_callback: A callable to be passed the Outcome associated with
+ the operation after it has terminated. Must be non-None if kind is
+ Kind.TERMINATION_ONLY. Must be None otherwise.
+ allowance: A callable behavior that accepts positive integers representing
+ the number of additional payloads allowed to be passed to the other side
+ of the operation. Must be None if kind is Kind.FULL. Must not be None
+ otherwise.
+ operator: An Operator to be passed values from the other side of the
+ operation. Must be non-None if kind is Kind.FULL. Must be None otherwise.
+ protocol_receiver: A ProtocolReceiver to be passed protocol objects as they
+ become available during the operation. Must be non-None if kind is
+ Kind.FULL.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+
+ NONE = 'none'
+ TERMINATION_ONLY = 'termination only'
+ FULL = 'full'
+
+
+class Servicer(abc.ABC):
+ """Interface for service implementations."""
+
+ @abc.abstractmethod
+ def service(self, group, method, context, output_operator):
+ """Services an operation.
+
+ Args:
+ group: The group identifier of the operation to be serviced.
+ method: The method identifier of the operation to be serviced.
+ context: An OperationContext object affording contextual information and
+ actions.
+ output_operator: An Operator that will accept output values of the
+ operation.
+
+ Returns:
+ A Subscription via which this object may or may not accept more values of
+ the operation.
+
+ Raises:
+ NoSuchMethodError: If this Servicer does not handle operations with the
+ given group and method.
+ abandonment.Abandoned: If the operation has been aborted and there no
+ longer is any reason to service the operation.
+ """
+ raise NotImplementedError()
+
+
+class End(abc.ABC):
+ """Common type for entry-point objects on both sides of an operation."""
+
+ @abc.abstractmethod
+ def start(self):
+ """Starts this object's service of operations."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stop(self, grace):
+ """Stops this object's service of operations.
+
+ This object will refuse service of new operations as soon as this method is
+ called but operations under way at the time of the call may be given a
+ grace period during which they are allowed to finish.
+
+ Args:
+ grace: A duration of time in seconds to allow ongoing operations to
+ terminate before being forcefully terminated by the stopping of this
+ End. May be zero to terminate all ongoing operations and immediately
+ stop.
+
+ Returns:
+ A threading.Event that will be set to indicate all operations having
+ terminated and this End having completely stopped. The returned event
+ may not be set until after the full grace period (if some ongoing
+ operation continues for the full length of the period) or it may be set
+ much sooner (if for example this End had no operations in progress at
+ the time its stop method was called).
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def operate(self,
+ group,
+ method,
+ subscription,
+ timeout,
+ initial_metadata=None,
+ payload=None,
+ completion=None,
+ protocol_options=None):
+ """Commences an operation.
+
+ Args:
+ group: The group identifier of the invoked operation.
+ method: The method identifier of the invoked operation.
+ subscription: A Subscription to which the results of the operation will be
+ passed.
+ timeout: A length of time in seconds to allow for the operation.
+ initial_metadata: An initial metadata value to be sent to the other side
+ of the operation. May be None if the initial metadata will be later
+ passed via the returned operator or if there will be no initial metadata
+ passed at all.
+ payload: An initial payload for the operation.
+ completion: A Completion value indicating the end of transmission to the
+ other side of the operation.
+ protocol_options: A value specified by the provider of a Base interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A pair of objects affording information about the operation and action
+ continuing the operation. The first element of the returned pair is an
+ OperationContext for the operation and the second element of the
+ returned pair is an Operator to which operation values not passed in
+ this call should later be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def operation_stats(self):
+ """Reports the number of terminated operations broken down by outcome.
+
+ Returns:
+ A dictionary from Outcome.Kind value to an integer identifying the number
+ of operations that terminated with that outcome kind.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_idle_action(self, action):
+ """Adds an action to be called when this End has no ongoing operations.
+
+ Args:
+ action: A callable that accepts no arguments.
+ """
+ raise NotImplementedError()
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/base/utilities.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/base/utilities.py
new file mode 100644
index 0000000000..281db62b5d
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/base/utilities.py
@@ -0,0 +1,71 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for use with the base interface of RPC Framework."""
+
+import collections
+
+from grpc.framework.interfaces.base import base
+
+
+class _Completion(base.Completion,
+ collections.namedtuple('_Completion', (
+ 'terminal_metadata',
+ 'code',
+ 'message',
+ ))):
+ """A trivial implementation of base.Completion."""
+
+
+class _Subscription(base.Subscription,
+ collections.namedtuple('_Subscription', (
+ 'kind',
+ 'termination_callback',
+ 'allowance',
+ 'operator',
+ 'protocol_receiver',
+ ))):
+ """A trivial implementation of base.Subscription."""
+
+
+_NONE_SUBSCRIPTION = _Subscription(base.Subscription.Kind.NONE, None, None,
+ None, None)
+
+
+def completion(terminal_metadata, code, message):
+ """Creates a base.Completion aggregating the given operation values.
+
+ Args:
+ terminal_metadata: A terminal metadata value for an operaton.
+ code: A code value for an operation.
+ message: A message value for an operation.
+
+ Returns:
+ A base.Completion aggregating the given operation values.
+ """
+ return _Completion(terminal_metadata, code, message)
+
+
+def full_subscription(operator, protocol_receiver):
+ """Creates a "full" base.Subscription for the given base.Operator.
+
+ Args:
+ operator: A base.Operator to be used in an operation.
+ protocol_receiver: A base.ProtocolReceiver to be used in an operation.
+
+ Returns:
+ A base.Subscription of kind base.Subscription.Kind.FULL wrapping the given
+ base.Operator and base.ProtocolReceiver.
+ """
+ return _Subscription(base.Subscription.Kind.FULL, None, None, operator,
+ protocol_receiver)
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/face/__init__.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/face/__init__.py
new file mode 100644
index 0000000000..5fb4f3c3cf
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/face/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/face/face.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/face/face.py
new file mode 100644
index 0000000000..ed0de6a7de
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/face/face.py
@@ -0,0 +1,1049 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Interfaces defining the Face layer of RPC Framework."""
+
+import abc
+import collections
+import enum
+
+# cardinality, style, abandonment, future, and stream are
+# referenced from specification in this module.
+from grpc.framework.common import cardinality # pylint: disable=unused-import
+from grpc.framework.common import style # pylint: disable=unused-import
+from grpc.framework.foundation import future # pylint: disable=unused-import
+from grpc.framework.foundation import stream # pylint: disable=unused-import
+
+# pylint: disable=too-many-arguments
+
+
+class NoSuchMethodError(Exception):
+ """Raised by customer code to indicate an unrecognized method.
+
+ Attributes:
+ group: The group of the unrecognized method.
+ name: The name of the unrecognized method.
+ """
+
+ def __init__(self, group, method):
+ """Constructor.
+
+ Args:
+ group: The group identifier of the unrecognized RPC name.
+ method: The method identifier of the unrecognized RPC name.
+ """
+ super(NoSuchMethodError, self).__init__()
+ self.group = group
+ self.method = method
+
+ def __repr__(self):
+ return 'face.NoSuchMethodError(%s, %s)' % (
+ self.group,
+ self.method,
+ )
+
+
+class Abortion(
+ collections.namedtuple('Abortion', (
+ 'kind',
+ 'initial_metadata',
+ 'terminal_metadata',
+ 'code',
+ 'details',
+ ))):
+ """A value describing RPC abortion.
+
+ Attributes:
+ kind: A Kind value identifying how the RPC failed.
+ initial_metadata: The initial metadata from the other side of the RPC or
+ None if no initial metadata value was received.
+ terminal_metadata: The terminal metadata from the other side of the RPC or
+ None if no terminal metadata value was received.
+ code: The code value from the other side of the RPC or None if no code value
+ was received.
+ details: The details value from the other side of the RPC or None if no
+ details value was received.
+ """
+
+ @enum.unique
+ class Kind(enum.Enum):
+ """Types of RPC abortion."""
+
+ CANCELLED = 'cancelled'
+ EXPIRED = 'expired'
+ LOCAL_SHUTDOWN = 'local shutdown'
+ REMOTE_SHUTDOWN = 'remote shutdown'
+ NETWORK_FAILURE = 'network failure'
+ LOCAL_FAILURE = 'local failure'
+ REMOTE_FAILURE = 'remote failure'
+
+
+class AbortionError(Exception, metaclass=abc.ABCMeta):
+ """Common super type for exceptions indicating RPC abortion.
+
+ initial_metadata: The initial metadata from the other side of the RPC or
+ None if no initial metadata value was received.
+ terminal_metadata: The terminal metadata from the other side of the RPC or
+ None if no terminal metadata value was received.
+ code: The code value from the other side of the RPC or None if no code value
+ was received.
+ details: The details value from the other side of the RPC or None if no
+ details value was received.
+ """
+
+ def __init__(self, initial_metadata, terminal_metadata, code, details):
+ super(AbortionError, self).__init__()
+ self.initial_metadata = initial_metadata
+ self.terminal_metadata = terminal_metadata
+ self.code = code
+ self.details = details
+
+ def __str__(self):
+ return '%s(code=%s, details="%s")' % (self.__class__.__name__,
+ self.code, self.details)
+
+
+class CancellationError(AbortionError):
+ """Indicates that an RPC has been cancelled."""
+
+
+class ExpirationError(AbortionError):
+ """Indicates that an RPC has expired ("timed out")."""
+
+
+class LocalShutdownError(AbortionError):
+ """Indicates that an RPC has terminated due to local shutdown of RPCs."""
+
+
+class RemoteShutdownError(AbortionError):
+ """Indicates that an RPC has terminated due to remote shutdown of RPCs."""
+
+
+class NetworkError(AbortionError):
+ """Indicates that some error occurred on the network."""
+
+
+class LocalError(AbortionError):
+ """Indicates that an RPC has terminated due to a local defect."""
+
+
+class RemoteError(AbortionError):
+ """Indicates that an RPC has terminated due to a remote defect."""
+
+
+class RpcContext(abc.ABC):
+ """Provides RPC-related information and action."""
+
+ @abc.abstractmethod
+ def is_active(self):
+ """Describes whether the RPC is active or has terminated."""
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def time_remaining(self):
+ """Describes the length of allowed time remaining for the RPC.
+
+ Returns:
+ A nonnegative float indicating the length of allowed time in seconds
+ remaining for the RPC to complete before it is considered to have timed
+ out.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def add_abortion_callback(self, abortion_callback):
+ """Registers a callback to be called if the RPC is aborted.
+
+ Args:
+ abortion_callback: A callable to be called and passed an Abortion value
+ in the event of RPC abortion.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def cancel(self):
+ """Cancels the RPC.
+
+ Idempotent and has no effect if the RPC has already terminated.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def protocol_context(self):
+ """Accesses a custom object specified by an implementation provider.
+
+ Returns:
+ A value specified by the provider of a Face interface implementation
+ affording custom state and behavior.
+ """
+ raise NotImplementedError()
+
+
+class Call(RpcContext, metaclass=abc.ABCMeta):
+ """Invocation-side utility object for an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self):
+ """Accesses the initial metadata from the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The initial metadata object emitted by the service-side of the RPC, or
+ None if there was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminal_metadata(self):
+ """Accesses the terminal metadata from the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The terminal metadata object emitted by the service-side of the RPC, or
+ None if there was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self):
+ """Accesses the code emitted by the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The code object emitted by the service-side of the RPC, or None if there
+ was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self):
+ """Accesses the details value emitted by the service-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the service-side of the RPC.
+
+ Returns:
+ The details value emitted by the service-side of the RPC, or None if there
+ was no such value.
+ """
+ raise NotImplementedError()
+
+
+class ServicerContext(RpcContext, metaclass=abc.ABCMeta):
+ """A context object passed to method implementations."""
+
+ @abc.abstractmethod
+ def invocation_metadata(self):
+ """Accesses the metadata from the invocation-side of the RPC.
+
+ This method blocks until the value is available or is known not to have been
+ emitted from the invocation-side of the RPC.
+
+ Returns:
+ The metadata object emitted by the invocation-side of the RPC, or None if
+ there was no such value.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def initial_metadata(self, initial_metadata):
+ """Accepts the service-side initial metadata value of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side initial metadata to transmit.
+
+ Args:
+ initial_metadata: The service-side initial metadata value of the RPC to
+ be transmitted to the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def terminal_metadata(self, terminal_metadata):
+ """Accepts the service-side terminal metadata value of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side terminal metadata to transmit.
+
+ Args:
+ terminal_metadata: The service-side terminal metadata value of the RPC to
+ be transmitted to the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def code(self, code):
+ """Accepts the service-side code of the RPC.
+
+ This method need not be called by method implementations if they have no
+ code to transmit.
+
+ Args:
+ code: The code of the RPC to be transmitted to the invocation side of the
+ RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def details(self, details):
+ """Accepts the service-side details of the RPC.
+
+ This method need not be called by method implementations if they have no
+ service-side details to transmit.
+
+ Args:
+ details: The service-side details value of the RPC to be transmitted to
+ the invocation side of the RPC.
+ """
+ raise NotImplementedError()
+
+
+class ResponseReceiver(abc.ABC):
+ """Invocation-side object used to accept the output of an RPC."""
+
+ @abc.abstractmethod
+ def initial_metadata(self, initial_metadata):
+ """Receives the initial metadata from the service-side of the RPC.
+
+ Args:
+ initial_metadata: The initial metadata object emitted from the
+ service-side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def response(self, response):
+ """Receives a response from the service-side of the RPC.
+
+ Args:
+ response: A response object emitted from the service-side of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def complete(self, terminal_metadata, code, details):
+ """Receives the completion values emitted from the service-side of the RPC.
+
+ Args:
+ terminal_metadata: The terminal metadata object emitted from the
+ service-side of the RPC.
+ code: The code object emitted from the service-side of the RPC.
+ details: The details object emitted from the service-side of the RPC.
+ """
+ raise NotImplementedError()
+
+
+class UnaryUnaryMultiCallable(abc.ABC):
+ """Affords invoking a unary-unary RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self, request, timeout, metadata=None, protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call for the RPC.
+ """
+ raise NotImplementedError()
+
+
+class UnaryStreamMultiCallable(abc.ABC):
+ """Affords invoking a unary-stream RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self, request, timeout, metadata=None, protocol_options=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call object for the RPC.
+ """
+ raise NotImplementedError()
+
+
+class StreamUnaryMultiCallable(abc.ABC):
+ """Affords invoking a stream-unary RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Synchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A single object that is both a Call object for the RPC and a
+ stream.Consumer to which the request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+
+class StreamStreamMultiCallable(abc.ABC):
+ """Affords invoking a stream-stream RPC in any call style."""
+
+ @abc.abstractmethod
+ def __call__(self,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes the underlying RPC.
+
+ Args:
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event(self,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Asynchronously invokes the underlying RPC.
+
+ Args:
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of
+ the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A single object that is both a Call object for the RPC and a
+ stream.Consumer to which the request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+
+class MethodImplementation(abc.ABC):
+ """A sum type that describes a method implementation.
+
+ Attributes:
+ cardinality: A cardinality.Cardinality value.
+ style: A style.Service value.
+ unary_unary_inline: The implementation of the method as a callable value
+ that takes a request value and a ServicerContext object and returns a
+ response value. Only non-None if cardinality is
+ cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE.
+ unary_stream_inline: The implementation of the method as a callable value
+ that takes a request value and a ServicerContext object and returns an
+ iterator of response values. Only non-None if cardinality is
+ cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE.
+ stream_unary_inline: The implementation of the method as a callable value
+ that takes an iterator of request values and a ServicerContext object and
+ returns a response value. Only non-None if cardinality is
+ cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE.
+ stream_stream_inline: The implementation of the method as a callable value
+ that takes an iterator of request values and a ServicerContext object and
+ returns an iterator of response values. Only non-None if cardinality is
+ cardinality.Cardinality.STREAM_STREAM and style is style.Service.INLINE.
+ unary_unary_event: The implementation of the method as a callable value that
+ takes a request value, a response callback to which to pass the response
+ value of the RPC, and a ServicerContext. Only non-None if cardinality is
+ cardinality.Cardinality.UNARY_UNARY and style is style.Service.EVENT.
+ unary_stream_event: The implementation of the method as a callable value
+ that takes a request value, a stream.Consumer to which to pass the
+ response values of the RPC, and a ServicerContext. Only non-None if
+ cardinality is cardinality.Cardinality.UNARY_STREAM and style is
+ style.Service.EVENT.
+ stream_unary_event: The implementation of the method as a callable value
+ that takes a response callback to which to pass the response value of the
+ RPC and a ServicerContext and returns a stream.Consumer to which the
+ request values of the RPC should be passed. Only non-None if cardinality
+ is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT.
+ stream_stream_event: The implementation of the method as a callable value
+ that takes a stream.Consumer to which to pass the response values of the
+ RPC and a ServicerContext and returns a stream.Consumer to which the
+ request values of the RPC should be passed. Only non-None if cardinality
+ is cardinality.Cardinality.STREAM_STREAM and style is
+ style.Service.EVENT.
+ """
+
+
+class MultiMethodImplementation(abc.ABC):
+ """A general type able to service many methods."""
+
+ @abc.abstractmethod
+ def service(self, group, method, response_consumer, context):
+ """Services an RPC.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ response_consumer: A stream.Consumer to be called to accept the response
+ values of the RPC.
+ context: a ServicerContext object.
+
+ Returns:
+ A stream.Consumer with which to accept the request values of the RPC. The
+ consumer returned from this method may or may not be invoked to
+ completion: in the case of RPC abortion, RPC Framework will simply stop
+ passing values to this object. Implementations must not assume that this
+ object will be called to completion of the request stream or even called
+ at all.
+
+ Raises:
+ abandonment.Abandoned: May or may not be raised when the RPC has been
+ aborted.
+ NoSuchMethodError: If this MultiMethod does not recognize the given group
+ and name for the RPC and is not able to service the RPC.
+ """
+ raise NotImplementedError()
+
+
+class GenericStub(abc.ABC):
+ """Affords RPC invocation via generic methods."""
+
+ @abc.abstractmethod
+ def blocking_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Invokes a unary-request-unary-response method.
+
+ This method blocks until either returning the response value of the RPC
+ (in the event of RPC completion) or raising an exception (in the event of
+ RPC abortion).
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_unary_unary(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a unary-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_unary_stream(self,
+ group,
+ method,
+ request,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a unary-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def blocking_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ with_call=False,
+ protocol_options=None):
+ """Invokes a stream-request-unary-response method.
+
+ This method blocks until either returning the response value of the RPC
+ (in the event of RPC completion) or raising an exception (in the event of
+ RPC abortion).
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ with_call: Whether or not to include return a Call for the RPC in addition
+ to the response.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ The response value for the RPC, and a Call for the RPC if with_call was
+ set to True at invocation.
+
+ Raises:
+ AbortionError: Indicating that the RPC was aborted.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def future_stream_unary(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a stream-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and a future.Future. In the
+ event of RPC completion, the return Future's result value will be the
+ response value of the RPC. In the event of RPC abortion, the returned
+ Future's exception value will be an AbortionError.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def inline_stream_stream(self,
+ group,
+ method,
+ request_iterator,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Invokes a stream-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request_iterator: An iterator that yields request values for the RPC.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ An object that is both a Call for the RPC and an iterator of response
+ values. Drawing response values from the returned iterator may raise
+ AbortionError indicating abortion of the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_unary_unary(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_unary_stream(self,
+ group,
+ method,
+ request,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ request: The request value for the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A Call for the RPC.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_unary(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-unary-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A pair of a Call object for the RPC and a stream.Consumer to which the
+ request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def event_stream_stream(self,
+ group,
+ method,
+ receiver,
+ abortion_callback,
+ timeout,
+ metadata=None,
+ protocol_options=None):
+ """Event-driven invocation of a unary-request-stream-response method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+ receiver: A ResponseReceiver to be passed the response data of the RPC.
+ abortion_callback: A callback to be called and passed an Abortion value
+ in the event of RPC abortion.
+ timeout: A duration of time in seconds to allow for the RPC.
+ metadata: A metadata value to be passed to the service-side of the RPC.
+ protocol_options: A value specified by the provider of a Face interface
+ implementation affording custom state and behavior.
+
+ Returns:
+ A pair of a Call object for the RPC and a stream.Consumer to which the
+ request values of the RPC should be passed.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_unary(self, group, method):
+ """Creates a UnaryUnaryMultiCallable for a unary-unary method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A UnaryUnaryMultiCallable value for the named unary-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def unary_stream(self, group, method):
+ """Creates a UnaryStreamMultiCallable for a unary-stream method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A UnaryStreamMultiCallable value for the name unary-stream method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_unary(self, group, method):
+ """Creates a StreamUnaryMultiCallable for a stream-unary method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A StreamUnaryMultiCallable value for the named stream-unary method.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def stream_stream(self, group, method):
+ """Creates a StreamStreamMultiCallable for a stream-stream method.
+
+ Args:
+ group: The group identifier of the RPC.
+ method: The method identifier of the RPC.
+
+ Returns:
+ A StreamStreamMultiCallable value for the named stream-stream method.
+ """
+ raise NotImplementedError()
+
+
+class DynamicStub(abc.ABC):
+ """Affords RPC invocation via attributes corresponding to afforded methods.
+
+ Instances of this type may be scoped to a single group so that attribute
+ access is unambiguous.
+
+ Instances of this type respond to attribute access as follows: if the
+ requested attribute is the name of a unary-unary method, the value of the
+ attribute will be a UnaryUnaryMultiCallable with which to invoke an RPC; if
+ the requested attribute is the name of a unary-stream method, the value of the
+ attribute will be a UnaryStreamMultiCallable with which to invoke an RPC; if
+ the requested attribute is the name of a stream-unary method, the value of the
+ attribute will be a StreamUnaryMultiCallable with which to invoke an RPC; and
+ if the requested attribute is the name of a stream-stream method, the value of
+ the attribute will be a StreamStreamMultiCallable with which to invoke an RPC.
+ """
diff --git a/contrib/python/grpcio/py3/grpc/framework/interfaces/face/utilities.py b/contrib/python/grpcio/py3/grpc/framework/interfaces/face/utilities.py
new file mode 100644
index 0000000000..f27bd67615
--- /dev/null
+++ b/contrib/python/grpcio/py3/grpc/framework/interfaces/face/utilities.py
@@ -0,0 +1,168 @@
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Utilities for RPC Framework's Face interface."""
+
+import collections
+
+# stream is referenced from specification in this module.
+from grpc.framework.common import cardinality
+from grpc.framework.common import style
+from grpc.framework.foundation import stream # pylint: disable=unused-import
+from grpc.framework.interfaces.face import face
+
+
+class _MethodImplementation(face.MethodImplementation,
+ collections.namedtuple('_MethodImplementation', [
+ 'cardinality',
+ 'style',
+ 'unary_unary_inline',
+ 'unary_stream_inline',
+ 'stream_unary_inline',
+ 'stream_stream_inline',
+ 'unary_unary_event',
+ 'unary_stream_event',
+ 'stream_unary_event',
+ 'stream_stream_event',
+ ])):
+ pass
+
+
+def unary_unary_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-unary RPC method as a callable value
+ that takes a request value and an face.ServicerContext object and
+ returns a response value.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_UNARY,
+ style.Service.INLINE, behavior, None, None,
+ None, None, None, None, None)
+
+
+def unary_stream_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-stream RPC method as a callable
+ value that takes a request value and an face.ServicerContext object and
+ returns an iterator of response values.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_STREAM,
+ style.Service.INLINE, None, behavior, None,
+ None, None, None, None, None)
+
+
+def stream_unary_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-unary RPC method as a callable
+ value that takes an iterator of request values and an
+ face.ServicerContext object and returns a response value.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_UNARY,
+ style.Service.INLINE, None, None, behavior,
+ None, None, None, None, None)
+
+
+def stream_stream_inline(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-stream RPC method as a callable
+ value that takes an iterator of request values and an
+ face.ServicerContext object and returns an iterator of response values.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_STREAM,
+ style.Service.INLINE, None, None, None,
+ behavior, None, None, None, None)
+
+
+def unary_unary_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-unary RPC method as a callable
+ value that takes a request value, a response callback to which to pass
+ the response value of the RPC, and an face.ServicerContext.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_UNARY,
+ style.Service.EVENT, None, None, None, None,
+ behavior, None, None, None)
+
+
+def unary_stream_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a unary-stream RPC method as a callable
+ value that takes a request value, a stream.Consumer to which to pass the
+ the response values of the RPC, and an face.ServicerContext.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.UNARY_STREAM,
+ style.Service.EVENT, None, None, None, None,
+ None, behavior, None, None)
+
+
+def stream_unary_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-unary RPC method as a callable
+ value that takes a response callback to which to pass the response value
+ of the RPC and an face.ServicerContext and returns a stream.Consumer to
+ which the request values of the RPC should be passed.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_UNARY,
+ style.Service.EVENT, None, None, None, None,
+ None, None, behavior, None)
+
+
+def stream_stream_event(behavior):
+ """Creates an face.MethodImplementation for the given behavior.
+
+ Args:
+ behavior: The implementation of a stream-stream RPC method as a callable
+ value that takes a stream.Consumer to which to pass the response values
+ of the RPC and an face.ServicerContext and returns a stream.Consumer to
+ which the request values of the RPC should be passed.
+
+ Returns:
+ An face.MethodImplementation derived from the given behavior.
+ """
+ return _MethodImplementation(cardinality.Cardinality.STREAM_STREAM,
+ style.Service.EVENT, None, None, None, None,
+ None, None, None, behavior)
diff --git a/contrib/python/grpcio/py3/ya.make b/contrib/python/grpcio/py3/ya.make
new file mode 100644
index 0000000000..6ac843ec53
--- /dev/null
+++ b/contrib/python/grpcio/py3/ya.make
@@ -0,0 +1,100 @@
+# Generated by devtools/yamaker from nixpkgs 22.11.
+
+PY3_LIBRARY()
+
+LICENSE(
+ Apache-2.0 AND
+ BSD-3-Clause AND
+ MPL-2.0 AND
+ Python-2.0
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(1.54.2)
+
+ORIGINAL_SOURCE(mirror://pypi/g/grpcio/grpcio-1.54.2.tar.gz)
+
+PEERDIR(
+ contrib/libs/grpc
+ contrib/python/six
+)
+
+ADDINCL(
+ ${ARCADIA_BUILD_ROOT}/contrib/libs/grpc
+ contrib/libs/grpc
+ contrib/libs/grpc/include
+ FOR
+ cython
+ contrib/python/grpcio/py3
+)
+
+IF (SANITIZER_TYPE == undefined)
+ CXXFLAGS(-fno-sanitize=function)
+ENDIF()
+
+NO_COMPILER_WARNINGS()
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ grpc/__init__.py
+ grpc/_auth.py
+ grpc/_channel.py
+ grpc/_common.py
+ grpc/_compression.py
+ grpc/_cython/__init__.py
+ grpc/_cython/_cygrpc/__init__.py
+ grpc/_cython/cygrpc.pyx
+ grpc/_grpcio_metadata.py
+ grpc/_interceptor.py
+ grpc/_plugin_wrapping.py
+ grpc/_runtime_protos.py
+ grpc/_server.py
+ grpc/_simple_stubs.py
+ grpc/_typing.py
+ grpc/_utilities.py
+ grpc/aio/__init__.py
+ grpc/aio/_base_call.py
+ grpc/aio/_base_channel.py
+ grpc/aio/_base_server.py
+ grpc/aio/_call.py
+ grpc/aio/_channel.py
+ grpc/aio/_interceptor.py
+ grpc/aio/_metadata.py
+ grpc/aio/_server.py
+ grpc/aio/_typing.py
+ grpc/aio/_utils.py
+ grpc/beta/__init__.py
+ grpc/beta/_client_adaptations.py
+ grpc/beta/_metadata.py
+ grpc/beta/_server_adaptations.py
+ grpc/beta/implementations.py
+ grpc/beta/interfaces.py
+ grpc/beta/utilities.py
+ grpc/experimental/__init__.py
+ grpc/experimental/aio/__init__.py
+ grpc/experimental/gevent.py
+ grpc/experimental/session_cache.py
+ grpc/framework/__init__.py
+ grpc/framework/common/__init__.py
+ grpc/framework/common/cardinality.py
+ grpc/framework/common/style.py
+ grpc/framework/foundation/__init__.py
+ grpc/framework/foundation/abandonment.py
+ grpc/framework/foundation/callable_util.py
+ grpc/framework/foundation/future.py
+ grpc/framework/foundation/logging_pool.py
+ grpc/framework/foundation/stream.py
+ grpc/framework/foundation/stream_util.py
+ grpc/framework/interfaces/__init__.py
+ grpc/framework/interfaces/base/__init__.py
+ grpc/framework/interfaces/base/base.py
+ grpc/framework/interfaces/base/utilities.py
+ grpc/framework/interfaces/face/__init__.py
+ grpc/framework/interfaces/face/face.py
+ grpc/framework/interfaces/face/utilities.py
+)
+
+END()
diff --git a/contrib/python/grpcio/ya.make b/contrib/python/grpcio/ya.make
new file mode 100644
index 0000000000..90333288f2
--- /dev/null
+++ b/contrib/python/grpcio/ya.make
@@ -0,0 +1,18 @@
+PY23_LIBRARY()
+
+LICENSE(Service-Py23-Proxy)
+
+IF (PYTHON2)
+ PEERDIR(contrib/python/grpcio/py2)
+ELSE()
+ PEERDIR(contrib/python/grpcio/py3)
+ENDIF()
+
+NO_LINT()
+
+END()
+
+RECURSE(
+ py2
+ py3
+)