aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormonory <monory@yandex-team.ru>2022-02-10 16:48:22 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:48:22 +0300
commit4c8dea05d4cd98e3c7740c7c524d05b0d88716f7 (patch)
tree20b6dfddb38d1ee32ca3faf368808a870126c41d
parent1d17d1551eecd4d143ecf2fb6fb05a9d71ccd6f5 (diff)
downloadydb-4c8dea05d4cd98e3c7740c7c524d05b0d88716f7.tar.gz
Restoring authorship annotation for <monory@yandex-team.ru>. Commit 1 of 2.
-rw-r--r--contrib/python/tornado/tornado-4/.dist-info/METADATA136
-rw-r--r--contrib/python/tornado/tornado-4/.dist-info/top_level.txt2
-rw-r--r--contrib/python/tornado/tornado-4/tornado/__init__.py58
-rw-r--r--contrib/python/tornado/tornado-4/tornado/_locale_data.py170
-rw-r--r--contrib/python/tornado/tornado-4/tornado/auth.py2308
-rw-r--r--contrib/python/tornado/tornado-4/tornado/autoreload.py668
-rw-r--r--contrib/python/tornado/tornado-4/tornado/concurrent.py1042
-rw-r--r--contrib/python/tornado/tornado-4/tornado/curl_httpclient.py1048
-rw-r--r--contrib/python/tornado/tornado-4/tornado/escape.py796
-rw-r--r--contrib/python/tornado/tornado-4/tornado/gen.py2608
-rw-r--r--contrib/python/tornado/tornado-4/tornado/http1connection.py1484
-rw-r--r--contrib/python/tornado/tornado-4/tornado/httpclient.py1356
-rw-r--r--contrib/python/tornado/tornado-4/tornado/httpserver.py650
-rw-r--r--contrib/python/tornado/tornado-4/tornado/httputil.py2040
-rw-r--r--contrib/python/tornado/tornado-4/tornado/ioloop.py2082
-rw-r--r--contrib/python/tornado/tornado-4/tornado/iostream.py3136
-rw-r--r--contrib/python/tornado/tornado-4/tornado/locale.py1042
-rw-r--r--contrib/python/tornado/tornado-4/tornado/locks.py1024
-rw-r--r--contrib/python/tornado/tornado-4/tornado/log.py580
-rw-r--r--contrib/python/tornado/tornado-4/tornado/netutil.py1062
-rw-r--r--contrib/python/tornado/tornado-4/tornado/options.py1188
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/asyncio.py444
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/auto.py118
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/caresresolver.py158
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/common.py226
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/epoll.py52
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/interface.py134
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/kqueue.py182
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/posix.py140
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/select.py152
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/twisted.py1182
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/windows.py40
-rw-r--r--contrib/python/tornado/tornado-4/tornado/platform/ya.make2
-rw-r--r--contrib/python/tornado/tornado-4/tornado/process.py730
-rw-r--r--contrib/python/tornado/tornado-4/tornado/queues.py734
-rw-r--r--contrib/python/tornado/tornado-4/tornado/routing.py1250
-rw-r--r--contrib/python/tornado/tornado-4/tornado/simple_httpclient.py1134
-rw-r--r--contrib/python/tornado/tornado-4/tornado/speedups.c104
-rw-r--r--contrib/python/tornado/tornado-4/tornado/stack_context.py780
-rw-r--r--contrib/python/tornado/tornado-4/tornado/tcpclient.py448
-rw-r--r--contrib/python/tornado/tornado-4/tornado/tcpserver.py600
-rw-r--r--contrib/python/tornado/tornado-4/tornado/template.py1956
-rw-r--r--contrib/python/tornado/tornado-4/tornado/testing.py1482
-rw-r--r--contrib/python/tornado/tornado-4/tornado/util.py950
-rw-r--r--contrib/python/tornado/tornado-4/tornado/web.py6572
-rw-r--r--contrib/python/tornado/tornado-4/tornado/websocket.py2488
-rw-r--r--contrib/python/tornado/tornado-4/tornado/wsgi.py716
-rw-r--r--contrib/python/tornado/tornado-4/tornado/ya.make2
-rw-r--r--contrib/python/tornado/tornado-4/ya.make164
-rw-r--r--contrib/python/tornado/ya.make6
-rw-r--r--contrib/python/ya.make8
-rw-r--r--ydb/tests/tools/ydb_serializable/lib/ya.make2
52 files changed, 23718 insertions, 23718 deletions
diff --git a/contrib/python/tornado/tornado-4/.dist-info/METADATA b/contrib/python/tornado/tornado-4/.dist-info/METADATA
index 3c02bd9be2..eda127b0ff 100644
--- a/contrib/python/tornado/tornado-4/.dist-info/METADATA
+++ b/contrib/python/tornado/tornado-4/.dist-info/METADATA
@@ -1,68 +1,68 @@
-Metadata-Version: 1.1
-Name: tornado
-Version: 4.5.3
-Summary: Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.
-Home-page: http://www.tornadoweb.org/
-Author: Facebook
-Author-email: python-tornado@googlegroups.com
-License: http://www.apache.org/licenses/LICENSE-2.0
-Description: Tornado Web Server
- ==================
-
- .. image:: https://badges.gitter.im/Join%20Chat.svg
- :alt: Join the chat at https://gitter.im/tornadoweb/tornado
- :target: https://gitter.im/tornadoweb/tornado?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
-
- `Tornado <http://www.tornadoweb.org>`_ is a Python web framework and
- asynchronous networking library, originally developed at `FriendFeed
- <http://friendfeed.com>`_. By using non-blocking network I/O, Tornado
- can scale to tens of thousands of open connections, making it ideal for
- `long polling <http://en.wikipedia.org/wiki/Push_technology#Long_Polling>`_,
- `WebSockets <http://en.wikipedia.org/wiki/WebSocket>`_, and other
- applications that require a long-lived connection to each user.
-
- Hello, world
- ------------
-
- Here is a simple "Hello, world" example web app for Tornado:
-
- .. code-block:: python
-
- import tornado.ioloop
- import tornado.web
-
- class MainHandler(tornado.web.RequestHandler):
- def get(self):
- self.write("Hello, world")
-
- def make_app():
- return tornado.web.Application([
- (r"/", MainHandler),
- ])
-
- if __name__ == "__main__":
- app = make_app()
- app.listen(8888)
- tornado.ioloop.IOLoop.current().start()
-
- This example does not use any of Tornado's asynchronous features; for
- that see this `simple chat room
- <https://github.com/tornadoweb/tornado/tree/stable/demos/chat>`_.
-
- Documentation
- -------------
-
- Documentation and links to additional resources are available at
- http://www.tornadoweb.org
-
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
+Metadata-Version: 1.1
+Name: tornado
+Version: 4.5.3
+Summary: Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.
+Home-page: http://www.tornadoweb.org/
+Author: Facebook
+Author-email: python-tornado@googlegroups.com
+License: http://www.apache.org/licenses/LICENSE-2.0
+Description: Tornado Web Server
+ ==================
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :alt: Join the chat at https://gitter.im/tornadoweb/tornado
+ :target: https://gitter.im/tornadoweb/tornado?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
+
+ `Tornado <http://www.tornadoweb.org>`_ is a Python web framework and
+ asynchronous networking library, originally developed at `FriendFeed
+ <http://friendfeed.com>`_. By using non-blocking network I/O, Tornado
+ can scale to tens of thousands of open connections, making it ideal for
+ `long polling <http://en.wikipedia.org/wiki/Push_technology#Long_Polling>`_,
+ `WebSockets <http://en.wikipedia.org/wiki/WebSocket>`_, and other
+ applications that require a long-lived connection to each user.
+
+ Hello, world
+ ------------
+
+ Here is a simple "Hello, world" example web app for Tornado:
+
+ .. code-block:: python
+
+ import tornado.ioloop
+ import tornado.web
+
+ class MainHandler(tornado.web.RequestHandler):
+ def get(self):
+ self.write("Hello, world")
+
+ def make_app():
+ return tornado.web.Application([
+ (r"/", MainHandler),
+ ])
+
+ if __name__ == "__main__":
+ app = make_app()
+ app.listen(8888)
+ tornado.ioloop.IOLoop.current().start()
+
+ This example does not use any of Tornado's asynchronous features; for
+ that see this `simple chat room
+ <https://github.com/tornadoweb/tornado/tree/stable/demos/chat>`_.
+
+ Documentation
+ -------------
+
+ Documentation and links to additional resources are available at
+ http://www.tornadoweb.org
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/contrib/python/tornado/tornado-4/.dist-info/top_level.txt b/contrib/python/tornado/tornado-4/.dist-info/top_level.txt
index c3368dfa51..bf65ecf44d 100644
--- a/contrib/python/tornado/tornado-4/.dist-info/top_level.txt
+++ b/contrib/python/tornado/tornado-4/.dist-info/top_level.txt
@@ -1 +1 @@
-tornado
+tornado
diff --git a/contrib/python/tornado/tornado-4/tornado/__init__.py b/contrib/python/tornado/tornado-4/tornado/__init__.py
index fa71bf6133..07b1f2ae0e 100644
--- a/contrib/python/tornado/tornado-4/tornado/__init__.py
+++ b/contrib/python/tornado/tornado-4/tornado/__init__.py
@@ -1,29 +1,29 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The Tornado web server and tools."""
-
-from __future__ import absolute_import, division, print_function
-
-# version is a human-readable version number.
-
-# version_info is a four-tuple for programmatic comparison. The first
-# three numbers are the components of the version number. The fourth
-# is zero for an official release, positive for a development branch,
-# or negative for a release candidate or beta (after the base version
-# number has been incremented)
-version = "4.5.3"
-version_info = (4, 5, 3, 0)
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The Tornado web server and tools."""
+
+from __future__ import absolute_import, division, print_function
+
+# version is a human-readable version number.
+
+# version_info is a four-tuple for programmatic comparison. The first
+# three numbers are the components of the version number. The fourth
+# is zero for an official release, positive for a development branch,
+# or negative for a release candidate or beta (after the base version
+# number has been incremented)
+version = "4.5.3"
+version_info = (4, 5, 3, 0)
diff --git a/contrib/python/tornado/tornado-4/tornado/_locale_data.py b/contrib/python/tornado/tornado-4/tornado/_locale_data.py
index 6fa2c29742..cee11e7deb 100644
--- a/contrib/python/tornado/tornado-4/tornado/_locale_data.py
+++ b/contrib/python/tornado/tornado-4/tornado/_locale_data.py
@@ -1,85 +1,85 @@
-#!/usr/bin/env python
-# coding: utf-8
-#
-# Copyright 2012 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Data used by the tornado.locale module."""
-
-from __future__ import absolute_import, division, print_function
-
-LOCALE_NAMES = {
- "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
- "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
- "ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
- "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
- "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
- "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
- "ca_ES": {"name_en": u"Catalan", "name": u"Català"},
- "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
- "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
- "da_DK": {"name_en": u"Danish", "name": u"Dansk"},
- "de_DE": {"name_en": u"German", "name": u"Deutsch"},
- "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
- "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
- "en_US": {"name_en": u"English (US)", "name": u"English (US)"},
- "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
- "es_LA": {"name_en": u"Spanish", "name": u"Español"},
- "et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
- "eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
- "fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
- "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
- "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
- "fr_FR": {"name_en": u"French", "name": u"Français"},
- "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
- "gl_ES": {"name_en": u"Galician", "name": u"Galego"},
- "he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
- "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
- "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
- "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
- "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
- "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
- "it_IT": {"name_en": u"Italian", "name": u"Italiano"},
- "ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
- "ko_KR": {"name_en": u"Korean", "name": u"한국어"},
- "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
- "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
- "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
- "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
- "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
- "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
- "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
- "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
- "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
- "pl_PL": {"name_en": u"Polish", "name": u"Polski"},
- "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
- "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
- "ro_RO": {"name_en": u"Romanian", "name": u"Română"},
- "ru_RU": {"name_en": u"Russian", "name": u"Русский"},
- "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
- "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
- "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
- "sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
- "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
- "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
- "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
- "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
- "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
- "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
- "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
- "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
- "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
- "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
- "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
-}
+#!/usr/bin/env python
+# coding: utf-8
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Data used by the tornado.locale module."""
+
+from __future__ import absolute_import, division, print_function
+
+LOCALE_NAMES = {
+ "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"},
+ "am_ET": {"name_en": u"Amharic", "name": u"አማርኛ"},
+ "ar_AR": {"name_en": u"Arabic", "name": u"العربية"},
+ "bg_BG": {"name_en": u"Bulgarian", "name": u"Български"},
+ "bn_IN": {"name_en": u"Bengali", "name": u"বাংলা"},
+ "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"},
+ "ca_ES": {"name_en": u"Catalan", "name": u"Català"},
+ "cs_CZ": {"name_en": u"Czech", "name": u"Čeština"},
+ "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"},
+ "da_DK": {"name_en": u"Danish", "name": u"Dansk"},
+ "de_DE": {"name_en": u"German", "name": u"Deutsch"},
+ "el_GR": {"name_en": u"Greek", "name": u"Ελληνικά"},
+ "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"},
+ "en_US": {"name_en": u"English (US)", "name": u"English (US)"},
+ "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Español (España)"},
+ "es_LA": {"name_en": u"Spanish", "name": u"Español"},
+ "et_EE": {"name_en": u"Estonian", "name": u"Eesti"},
+ "eu_ES": {"name_en": u"Basque", "name": u"Euskara"},
+ "fa_IR": {"name_en": u"Persian", "name": u"فارسی"},
+ "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"},
+ "fr_CA": {"name_en": u"French (Canada)", "name": u"Français (Canada)"},
+ "fr_FR": {"name_en": u"French", "name": u"Français"},
+ "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"},
+ "gl_ES": {"name_en": u"Galician", "name": u"Galego"},
+ "he_IL": {"name_en": u"Hebrew", "name": u"עברית"},
+ "hi_IN": {"name_en": u"Hindi", "name": u"हिन्दी"},
+ "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"},
+ "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"},
+ "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"},
+ "is_IS": {"name_en": u"Icelandic", "name": u"Íslenska"},
+ "it_IT": {"name_en": u"Italian", "name": u"Italiano"},
+ "ja_JP": {"name_en": u"Japanese", "name": u"日本語"},
+ "ko_KR": {"name_en": u"Korean", "name": u"한국어"},
+ "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvių"},
+ "lv_LV": {"name_en": u"Latvian", "name": u"Latviešu"},
+ "mk_MK": {"name_en": u"Macedonian", "name": u"Македонски"},
+ "ml_IN": {"name_en": u"Malayalam", "name": u"മലയാളം"},
+ "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"},
+ "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokmål)"},
+ "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"},
+ "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"},
+ "pa_IN": {"name_en": u"Punjabi", "name": u"ਪੰਜਾਬੀ"},
+ "pl_PL": {"name_en": u"Polish", "name": u"Polski"},
+ "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Português (Brasil)"},
+ "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Português (Portugal)"},
+ "ro_RO": {"name_en": u"Romanian", "name": u"Română"},
+ "ru_RU": {"name_en": u"Russian", "name": u"Русский"},
+ "sk_SK": {"name_en": u"Slovak", "name": u"Slovenčina"},
+ "sl_SI": {"name_en": u"Slovenian", "name": u"Slovenščina"},
+ "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"},
+ "sr_RS": {"name_en": u"Serbian", "name": u"Српски"},
+ "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"},
+ "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"},
+ "ta_IN": {"name_en": u"Tamil", "name": u"தமிழ்"},
+ "te_IN": {"name_en": u"Telugu", "name": u"తెలుగు"},
+ "th_TH": {"name_en": u"Thai", "name": u"ภาษาไทย"},
+ "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"},
+ "tr_TR": {"name_en": u"Turkish", "name": u"Türkçe"},
+ "uk_UA": {"name_en": u"Ukraini ", "name": u"Українська"},
+ "vi_VN": {"name_en": u"Vietnamese", "name": u"Tiếng Việt"},
+ "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"中文(简体)"},
+ "zh_TW": {"name_en": u"Chinese (Traditional)", "name": u"中文(繁體)"},
+}
diff --git a/contrib/python/tornado/tornado-4/tornado/auth.py b/contrib/python/tornado/tornado-4/tornado/auth.py
index f02d289808..edd1801731 100644
--- a/contrib/python/tornado/tornado-4/tornado/auth.py
+++ b/contrib/python/tornado/tornado-4/tornado/auth.py
@@ -1,1154 +1,1154 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""This module contains implementations of various third-party
-authentication schemes.
-
-All the classes in this file are class mixins designed to be used with
-the `tornado.web.RequestHandler` class. They are used in two ways:
-
-* On a login handler, use methods such as ``authenticate_redirect()``,
- ``authorize_redirect()``, and ``get_authenticated_user()`` to
- establish the user's identity and store authentication tokens to your
- database and/or cookies.
-* In non-login handlers, use methods such as ``facebook_request()``
- or ``twitter_request()`` to use the authentication tokens to make
- requests to the respective services.
-
-They all take slightly different arguments due to the fact all these
-services implement authentication and authorization slightly differently.
-See the individual service classes below for complete documentation.
-
-Example usage for Google OAuth:
-
-.. testcode::
-
- class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
- tornado.auth.GoogleOAuth2Mixin):
- @tornado.gen.coroutine
- def get(self):
- if self.get_argument('code', False):
- user = yield self.get_authenticated_user(
- redirect_uri='http://your.site.com/auth/google',
- code=self.get_argument('code'))
- # Save the user with e.g. set_secure_cookie
- else:
- yield self.authorize_redirect(
- redirect_uri='http://your.site.com/auth/google',
- client_id=self.settings['google_oauth']['key'],
- scope=['profile', 'email'],
- response_type='code',
- extra_params={'approval_prompt': 'auto'})
-
-.. testoutput::
- :hide:
-
-
-.. versionchanged:: 4.0
- All of the callback interfaces in this module are now guaranteed
- to run their callback with an argument of ``None`` on error.
- Previously some functions would do this while others would simply
- terminate the request on their own. This change also ensures that
- errors are more consistently reported through the ``Future`` interfaces.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import base64
-import binascii
-import functools
-import hashlib
-import hmac
-import time
-import uuid
-
-from tornado.concurrent import TracebackFuture, return_future, chain_future
-from tornado import gen
-from tornado import httpclient
-from tornado import escape
-from tornado.httputil import url_concat
-from tornado.log import gen_log
-from tornado.stack_context import ExceptionStackContext
-from tornado.util import unicode_type, ArgReplacer, PY3
-
-if PY3:
- import urllib.parse as urlparse
- import urllib.parse as urllib_parse
- long = int
-else:
- import urlparse
- import urllib as urllib_parse
-
-
-class AuthError(Exception):
- pass
-
-
-def _auth_future_to_callback(callback, future):
- try:
- result = future.result()
- except AuthError as e:
- gen_log.warning(str(e))
- result = None
- callback(result)
-
-
-def _auth_return_future(f):
- """Similar to tornado.concurrent.return_future, but uses the auth
- module's legacy callback interface.
-
- Note that when using this decorator the ``callback`` parameter
- inside the function will actually be a future.
- """
- replacer = ArgReplacer(f, 'callback')
-
- @functools.wraps(f)
- def wrapper(*args, **kwargs):
- future = TracebackFuture()
- callback, args, kwargs = replacer.replace(future, args, kwargs)
- if callback is not None:
- future.add_done_callback(
- functools.partial(_auth_future_to_callback, callback))
-
- def handle_exception(typ, value, tb):
- if future.done():
- return False
- else:
- future.set_exc_info((typ, value, tb))
- return True
- with ExceptionStackContext(handle_exception):
- f(*args, **kwargs)
- return future
- return wrapper
-
-
-class OpenIdMixin(object):
- """Abstract implementation of OpenID and Attribute Exchange.
-
- Class attributes:
-
- * ``_OPENID_ENDPOINT``: the identity provider's URI.
- """
- @return_future
- def authenticate_redirect(self, callback_uri=None,
- ax_attrs=["name", "email", "language", "username"],
- callback=None):
- """Redirects to the authentication URL for this service.
-
- After authentication, the service will redirect back to the given
- callback URI with additional parameters including ``openid.mode``.
-
- We request the given attributes for the authenticated user by
- default (name, email, language, and username). If you don't need
- all those attributes for your app, you can request fewer with
- the ax_attrs keyword argument.
-
- .. versionchanged:: 3.1
- Returns a `.Future` and takes an optional callback. These are
- not strictly necessary as this method is synchronous,
- but they are supplied for consistency with
- `OAuthMixin.authorize_redirect`.
- """
- callback_uri = callback_uri or self.request.uri
- args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
- self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
- callback()
-
- @_auth_return_future
- def get_authenticated_user(self, callback, http_client=None):
- """Fetches the authenticated user data upon redirect.
-
- This method should be called by the handler that receives the
- redirect from the `authenticate_redirect()` method (which is
- often the same as the one that calls it; in that case you would
- call `get_authenticated_user` if the ``openid.mode`` parameter
- is present and `authenticate_redirect` if it is not).
-
- The result of this method will generally be used to set a cookie.
- """
- # Verify the OpenID response via direct request to the OP
- args = dict((k, v[-1]) for k, v in self.request.arguments.items())
- args["openid.mode"] = u"check_authentication"
- url = self._OPENID_ENDPOINT
- if http_client is None:
- http_client = self.get_auth_http_client()
- http_client.fetch(url, functools.partial(
- self._on_authentication_verified, callback),
- method="POST", body=urllib_parse.urlencode(args))
-
- def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
- url = urlparse.urljoin(self.request.full_url(), callback_uri)
- args = {
- "openid.ns": "http://specs.openid.net/auth/2.0",
- "openid.claimed_id":
- "http://specs.openid.net/auth/2.0/identifier_select",
- "openid.identity":
- "http://specs.openid.net/auth/2.0/identifier_select",
- "openid.return_to": url,
- "openid.realm": urlparse.urljoin(url, '/'),
- "openid.mode": "checkid_setup",
- }
- if ax_attrs:
- args.update({
- "openid.ns.ax": "http://openid.net/srv/ax/1.0",
- "openid.ax.mode": "fetch_request",
- })
- ax_attrs = set(ax_attrs)
- required = []
- if "name" in ax_attrs:
- ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
- required += ["firstname", "fullname", "lastname"]
- args.update({
- "openid.ax.type.firstname":
- "http://axschema.org/namePerson/first",
- "openid.ax.type.fullname":
- "http://axschema.org/namePerson",
- "openid.ax.type.lastname":
- "http://axschema.org/namePerson/last",
- })
- known_attrs = {
- "email": "http://axschema.org/contact/email",
- "language": "http://axschema.org/pref/language",
- "username": "http://axschema.org/namePerson/friendly",
- }
- for name in ax_attrs:
- args["openid.ax.type." + name] = known_attrs[name]
- required.append(name)
- args["openid.ax.required"] = ",".join(required)
- if oauth_scope:
- args.update({
- "openid.ns.oauth":
- "http://specs.openid.net/extensions/oauth/1.0",
- "openid.oauth.consumer": self.request.host.split(":")[0],
- "openid.oauth.scope": oauth_scope,
- })
- return args
-
- def _on_authentication_verified(self, future, response):
- if response.error or b"is_valid:true" not in response.body:
- future.set_exception(AuthError(
- "Invalid OpenID response: %s" % (response.error or
- response.body)))
- return
-
- # Make sure we got back at least an email from attribute exchange
- ax_ns = None
- for name in self.request.arguments:
- if name.startswith("openid.ns.") and \
- self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
- ax_ns = name[10:]
- break
-
- def get_ax_arg(uri):
- if not ax_ns:
- return u""
- prefix = "openid." + ax_ns + ".type."
- ax_name = None
- for name in self.request.arguments.keys():
- if self.get_argument(name) == uri and name.startswith(prefix):
- part = name[len(prefix):]
- ax_name = "openid." + ax_ns + ".value." + part
- break
- if not ax_name:
- return u""
- return self.get_argument(ax_name, u"")
-
- email = get_ax_arg("http://axschema.org/contact/email")
- name = get_ax_arg("http://axschema.org/namePerson")
- first_name = get_ax_arg("http://axschema.org/namePerson/first")
- last_name = get_ax_arg("http://axschema.org/namePerson/last")
- username = get_ax_arg("http://axschema.org/namePerson/friendly")
- locale = get_ax_arg("http://axschema.org/pref/language").lower()
- user = dict()
- name_parts = []
- if first_name:
- user["first_name"] = first_name
- name_parts.append(first_name)
- if last_name:
- user["last_name"] = last_name
- name_parts.append(last_name)
- if name:
- user["name"] = name
- elif name_parts:
- user["name"] = u" ".join(name_parts)
- elif email:
- user["name"] = email.split("@")[0]
- if email:
- user["email"] = email
- if locale:
- user["locale"] = locale
- if username:
- user["username"] = username
- claimed_id = self.get_argument("openid.claimed_id", None)
- if claimed_id:
- user["claimed_id"] = claimed_id
- future.set_result(user)
-
- def get_auth_http_client(self):
- """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
-
- May be overridden by subclasses to use an HTTP client other than
- the default.
- """
- return httpclient.AsyncHTTPClient()
-
-
-class OAuthMixin(object):
- """Abstract implementation of OAuth 1.0 and 1.0a.
-
- See `TwitterMixin` below for an example implementation.
-
- Class attributes:
-
- * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
- * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
- * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
- * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
- advance registration of callbacks.
-
- Subclasses must also override the `_oauth_get_user_future` and
- `_oauth_consumer_token` methods.
- """
- @return_future
- def authorize_redirect(self, callback_uri=None, extra_params=None,
- http_client=None, callback=None):
- """Redirects the user to obtain OAuth authorization for this service.
-
- The ``callback_uri`` may be omitted if you have previously
- registered a callback URI with the third-party service. For
- some services (including Friendfeed), you must use a
- previously-registered callback URI and cannot specify a
- callback via this method.
-
- This method sets a cookie called ``_oauth_request_token`` which is
- subsequently used (and cleared) in `get_authenticated_user` for
- security purposes.
-
- Note that this method is asynchronous, although it calls
- `.RequestHandler.finish` for you so it may not be necessary
- to pass a callback or use the `.Future` it returns. However,
- if this method is called from a function decorated with
- `.gen.coroutine`, you must call it with ``yield`` to keep the
- response from being closed prematurely.
-
- .. versionchanged:: 3.1
- Now returns a `.Future` and takes an optional callback, for
- compatibility with `.gen.coroutine`.
- """
- if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
- raise Exception("This service does not support oauth_callback")
- if http_client is None:
- http_client = self.get_auth_http_client()
- if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
- http_client.fetch(
- self._oauth_request_token_url(callback_uri=callback_uri,
- extra_params=extra_params),
- functools.partial(
- self._on_request_token,
- self._OAUTH_AUTHORIZE_URL,
- callback_uri,
- callback))
- else:
- http_client.fetch(
- self._oauth_request_token_url(),
- functools.partial(
- self._on_request_token, self._OAUTH_AUTHORIZE_URL,
- callback_uri,
- callback))
-
- @_auth_return_future
- def get_authenticated_user(self, callback, http_client=None):
- """Gets the OAuth authorized user and access token.
-
- This method should be called from the handler for your
- OAuth callback URL to complete the registration process. We run the
- callback with the authenticated user dictionary. This dictionary
- will contain an ``access_key`` which can be used to make authorized
- requests to this service on behalf of the user. The dictionary will
- also contain other fields such as ``name``, depending on the service
- used.
- """
- future = callback
- request_key = escape.utf8(self.get_argument("oauth_token"))
- oauth_verifier = self.get_argument("oauth_verifier", None)
- request_cookie = self.get_cookie("_oauth_request_token")
- if not request_cookie:
- future.set_exception(AuthError(
- "Missing OAuth request token cookie"))
- return
- self.clear_cookie("_oauth_request_token")
- cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
- if cookie_key != request_key:
- future.set_exception(AuthError(
- "Request token does not match cookie"))
- return
- token = dict(key=cookie_key, secret=cookie_secret)
- if oauth_verifier:
- token["verifier"] = oauth_verifier
- if http_client is None:
- http_client = self.get_auth_http_client()
- http_client.fetch(self._oauth_access_token_url(token),
- functools.partial(self._on_access_token, callback))
-
- def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
- consumer_token = self._oauth_consumer_token()
- url = self._OAUTH_REQUEST_TOKEN_URL
- args = dict(
- oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
- oauth_signature_method="HMAC-SHA1",
- oauth_timestamp=str(int(time.time())),
- oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
- oauth_version="1.0",
- )
- if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
- if callback_uri == "oob":
- args["oauth_callback"] = "oob"
- elif callback_uri:
- args["oauth_callback"] = urlparse.urljoin(
- self.request.full_url(), callback_uri)
- if extra_params:
- args.update(extra_params)
- signature = _oauth10a_signature(consumer_token, "GET", url, args)
- else:
- signature = _oauth_signature(consumer_token, "GET", url, args)
-
- args["oauth_signature"] = signature
- return url + "?" + urllib_parse.urlencode(args)
-
- def _on_request_token(self, authorize_url, callback_uri, callback,
- response):
- if response.error:
- raise Exception("Could not get request token: %s" % response.error)
- request_token = _oauth_parse_response(response.body)
- data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
- base64.b64encode(escape.utf8(request_token["secret"])))
- self.set_cookie("_oauth_request_token", data)
- args = dict(oauth_token=request_token["key"])
- if callback_uri == "oob":
- self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
- callback()
- return
- elif callback_uri:
- args["oauth_callback"] = urlparse.urljoin(
- self.request.full_url(), callback_uri)
- self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
- callback()
-
- def _oauth_access_token_url(self, request_token):
- consumer_token = self._oauth_consumer_token()
- url = self._OAUTH_ACCESS_TOKEN_URL
- args = dict(
- oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
- oauth_token=escape.to_basestring(request_token["key"]),
- oauth_signature_method="HMAC-SHA1",
- oauth_timestamp=str(int(time.time())),
- oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
- oauth_version="1.0",
- )
- if "verifier" in request_token:
- args["oauth_verifier"] = request_token["verifier"]
-
- if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
- signature = _oauth10a_signature(consumer_token, "GET", url, args,
- request_token)
- else:
- signature = _oauth_signature(consumer_token, "GET", url, args,
- request_token)
-
- args["oauth_signature"] = signature
- return url + "?" + urllib_parse.urlencode(args)
-
- def _on_access_token(self, future, response):
- if response.error:
- future.set_exception(AuthError("Could not fetch access token"))
- return
-
- access_token = _oauth_parse_response(response.body)
- self._oauth_get_user_future(access_token).add_done_callback(
- functools.partial(self._on_oauth_get_user, access_token, future))
-
- def _oauth_consumer_token(self):
- """Subclasses must override this to return their OAuth consumer keys.
-
- The return value should be a `dict` with keys ``key`` and ``secret``.
- """
- raise NotImplementedError()
-
- @return_future
- def _oauth_get_user_future(self, access_token, callback):
- """Subclasses must override this to get basic information about the
- user.
-
- Should return a `.Future` whose result is a dictionary
- containing information about the user, which may have been
- retrieved by using ``access_token`` to make a request to the
- service.
-
- The access token will be added to the returned dictionary to make
- the result of `get_authenticated_user`.
-
- For backwards compatibility, the callback-based ``_oauth_get_user``
- method is also supported.
- """
- # By default, call the old-style _oauth_get_user, but new code
- # should override this method instead.
- self._oauth_get_user(access_token, callback)
-
- def _oauth_get_user(self, access_token, callback):
- raise NotImplementedError()
-
- def _on_oauth_get_user(self, access_token, future, user_future):
- if user_future.exception() is not None:
- future.set_exception(user_future.exception())
- return
- user = user_future.result()
- if not user:
- future.set_exception(AuthError("Error getting user"))
- return
- user["access_token"] = access_token
- future.set_result(user)
-
- def _oauth_request_parameters(self, url, access_token, parameters={},
- method="GET"):
- """Returns the OAuth parameters as a dict for the given request.
-
- parameters should include all POST arguments and query string arguments
- that will be sent with the request.
- """
- consumer_token = self._oauth_consumer_token()
- base_args = dict(
- oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
- oauth_token=escape.to_basestring(access_token["key"]),
- oauth_signature_method="HMAC-SHA1",
- oauth_timestamp=str(int(time.time())),
- oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
- oauth_version="1.0",
- )
- args = {}
- args.update(base_args)
- args.update(parameters)
- if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
- signature = _oauth10a_signature(consumer_token, method, url, args,
- access_token)
- else:
- signature = _oauth_signature(consumer_token, method, url, args,
- access_token)
- base_args["oauth_signature"] = escape.to_basestring(signature)
- return base_args
-
- def get_auth_http_client(self):
- """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
-
- May be overridden by subclasses to use an HTTP client other than
- the default.
- """
- return httpclient.AsyncHTTPClient()
-
-
-class OAuth2Mixin(object):
- """Abstract implementation of OAuth 2.0.
-
- See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example
- implementations.
-
- Class attributes:
-
- * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
- * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
- """
- @return_future
- def authorize_redirect(self, redirect_uri=None, client_id=None,
- client_secret=None, extra_params=None,
- callback=None, scope=None, response_type="code"):
- """Redirects the user to obtain OAuth authorization for this service.
-
- Some providers require that you register a redirect URL with
- your application instead of passing one via this method. You
- should call this method to log the user in, and then call
- ``get_authenticated_user`` in the handler for your
- redirect URL to complete the authorization process.
-
- .. versionchanged:: 3.1
- Returns a `.Future` and takes an optional callback. These are
- not strictly necessary as this method is synchronous,
- but they are supplied for consistency with
- `OAuthMixin.authorize_redirect`.
- """
- args = {
- "redirect_uri": redirect_uri,
- "client_id": client_id,
- "response_type": response_type
- }
- if extra_params:
- args.update(extra_params)
- if scope:
- args['scope'] = ' '.join(scope)
- self.redirect(
- url_concat(self._OAUTH_AUTHORIZE_URL, args))
- callback()
-
- def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
- client_secret=None, code=None,
- extra_params=None):
- url = self._OAUTH_ACCESS_TOKEN_URL
- args = dict(
- redirect_uri=redirect_uri,
- code=code,
- client_id=client_id,
- client_secret=client_secret,
- )
- if extra_params:
- args.update(extra_params)
- return url_concat(url, args)
-
- @_auth_return_future
- def oauth2_request(self, url, callback, access_token=None,
- post_args=None, **args):
- """Fetches the given URL auth an OAuth2 access token.
-
- If the request is a POST, ``post_args`` should be provided. Query
- string arguments should be given as keyword arguments.
-
- Example usage:
-
- ..testcode::
-
- class MainHandler(tornado.web.RequestHandler,
- tornado.auth.FacebookGraphMixin):
- @tornado.web.authenticated
- @tornado.gen.coroutine
- def get(self):
- new_entry = yield self.oauth2_request(
- "https://graph.facebook.com/me/feed",
- post_args={"message": "I am posting from my Tornado application!"},
- access_token=self.current_user["access_token"])
-
- if not new_entry:
- # Call failed; perhaps missing permission?
- yield self.authorize_redirect()
- return
- self.finish("Posted a message!")
-
- .. testoutput::
- :hide:
-
- .. versionadded:: 4.3
- """
- all_args = {}
- if access_token:
- all_args["access_token"] = access_token
- all_args.update(args)
-
- if all_args:
- url += "?" + urllib_parse.urlencode(all_args)
- callback = functools.partial(self._on_oauth2_request, callback)
- http = self.get_auth_http_client()
- if post_args is not None:
- http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
- callback=callback)
- else:
- http.fetch(url, callback=callback)
-
- def _on_oauth2_request(self, future, response):
- if response.error:
- future.set_exception(AuthError("Error response %s fetching %s" %
- (response.error, response.request.url)))
- return
-
- future.set_result(escape.json_decode(response.body))
-
- def get_auth_http_client(self):
- """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
-
- May be overridden by subclasses to use an HTTP client other than
- the default.
-
- .. versionadded:: 4.3
- """
- return httpclient.AsyncHTTPClient()
-
-
-class TwitterMixin(OAuthMixin):
- """Twitter OAuth authentication.
-
- To authenticate with Twitter, register your application with
- Twitter at http://twitter.com/apps. Then copy your Consumer Key
- and Consumer Secret to the application
- `~tornado.web.Application.settings` ``twitter_consumer_key`` and
- ``twitter_consumer_secret``. Use this mixin on the handler for the
- URL you registered as your application's callback URL.
-
- When your application is set up, you can use this mixin like this
- to authenticate the user with Twitter and get access to their stream:
-
- .. testcode::
-
- class TwitterLoginHandler(tornado.web.RequestHandler,
- tornado.auth.TwitterMixin):
- @tornado.gen.coroutine
- def get(self):
- if self.get_argument("oauth_token", None):
- user = yield self.get_authenticated_user()
- # Save the user using e.g. set_secure_cookie()
- else:
- yield self.authorize_redirect()
-
- .. testoutput::
- :hide:
-
- The user object returned by `~OAuthMixin.get_authenticated_user`
- includes the attributes ``username``, ``name``, ``access_token``,
- and all of the custom Twitter user attributes described at
- https://dev.twitter.com/docs/api/1.1/get/users/show
- """
- _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
- _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
- _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
- _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
- _OAUTH_NO_CALLBACKS = False
- _TWITTER_BASE_URL = "https://api.twitter.com/1.1"
-
- @return_future
- def authenticate_redirect(self, callback_uri=None, callback=None):
- """Just like `~OAuthMixin.authorize_redirect`, but
- auto-redirects if authorized.
-
- This is generally the right interface to use if you are using
- Twitter for single-sign on.
-
- .. versionchanged:: 3.1
- Now returns a `.Future` and takes an optional callback, for
- compatibility with `.gen.coroutine`.
- """
- http = self.get_auth_http_client()
- http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
- functools.partial(
- self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
- None, callback))
-
- @_auth_return_future
- def twitter_request(self, path, callback=None, access_token=None,
- post_args=None, **args):
- """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
-
- The path should not include the format or API version number.
- (we automatically use JSON format and API version 1).
-
- If the request is a POST, ``post_args`` should be provided. Query
- string arguments should be given as keyword arguments.
-
- All the Twitter methods are documented at http://dev.twitter.com/
-
- Many methods require an OAuth access token which you can
- obtain through `~OAuthMixin.authorize_redirect` and
- `~OAuthMixin.get_authenticated_user`. The user returned through that
- process includes an 'access_token' attribute that can be used
- to make authenticated requests via this method. Example
- usage:
-
- .. testcode::
-
- class MainHandler(tornado.web.RequestHandler,
- tornado.auth.TwitterMixin):
- @tornado.web.authenticated
- @tornado.gen.coroutine
- def get(self):
- new_entry = yield self.twitter_request(
- "/statuses/update",
- post_args={"status": "Testing Tornado Web Server"},
- access_token=self.current_user["access_token"])
- if not new_entry:
- # Call failed; perhaps missing permission?
- yield self.authorize_redirect()
- return
- self.finish("Posted a message!")
-
- .. testoutput::
- :hide:
-
- """
- if path.startswith('http:') or path.startswith('https:'):
- # Raw urls are useful for e.g. search which doesn't follow the
- # usual pattern: http://search.twitter.com/search.json
- url = path
- else:
- url = self._TWITTER_BASE_URL + path + ".json"
- # Add the OAuth resource request signature if we have credentials
- if access_token:
- all_args = {}
- all_args.update(args)
- all_args.update(post_args or {})
- method = "POST" if post_args is not None else "GET"
- oauth = self._oauth_request_parameters(
- url, access_token, all_args, method=method)
- args.update(oauth)
- if args:
- url += "?" + urllib_parse.urlencode(args)
- http = self.get_auth_http_client()
- http_callback = functools.partial(self._on_twitter_request, callback)
- if post_args is not None:
- http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
- callback=http_callback)
- else:
- http.fetch(url, callback=http_callback)
-
- def _on_twitter_request(self, future, response):
- if response.error:
- future.set_exception(AuthError(
- "Error response %s fetching %s" % (response.error,
- response.request.url)))
- return
- future.set_result(escape.json_decode(response.body))
-
- def _oauth_consumer_token(self):
- self.require_setting("twitter_consumer_key", "Twitter OAuth")
- self.require_setting("twitter_consumer_secret", "Twitter OAuth")
- return dict(
- key=self.settings["twitter_consumer_key"],
- secret=self.settings["twitter_consumer_secret"])
-
- @gen.coroutine
- def _oauth_get_user_future(self, access_token):
- user = yield self.twitter_request(
- "/account/verify_credentials",
- access_token=access_token)
- if user:
- user["username"] = user["screen_name"]
- raise gen.Return(user)
-
-
-class GoogleOAuth2Mixin(OAuth2Mixin):
- """Google authentication using OAuth2.
-
- In order to use, register your application with Google and copy the
- relevant parameters to your application settings.
-
- * Go to the Google Dev Console at http://console.developers.google.com
- * Select a project, or create a new one.
- * In the sidebar on the left, select APIs & Auth.
- * In the list of APIs, find the Google+ API service and set it to ON.
- * In the sidebar on the left, select Credentials.
- * In the OAuth section of the page, select Create New Client ID.
- * Set the Redirect URI to point to your auth handler
- * Copy the "Client secret" and "Client ID" to the application settings as
- {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
-
- .. versionadded:: 3.2
- """
- _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
- _OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
- _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
- _OAUTH_NO_CALLBACKS = False
- _OAUTH_SETTINGS_KEY = 'google_oauth'
-
- @_auth_return_future
- def get_authenticated_user(self, redirect_uri, code, callback):
- """Handles the login for the Google user, returning an access token.
-
- The result is a dictionary containing an ``access_token`` field
- ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
- Unlike other ``get_authenticated_user`` methods in this package,
- this method does not return any additional information about the user.
- The returned access token can be used with `OAuth2Mixin.oauth2_request`
- to request additional information (perhaps from
- ``https://www.googleapis.com/oauth2/v2/userinfo``)
-
- Example usage:
-
- .. testcode::
-
- class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
- tornado.auth.GoogleOAuth2Mixin):
- @tornado.gen.coroutine
- def get(self):
- if self.get_argument('code', False):
- access = yield self.get_authenticated_user(
- redirect_uri='http://your.site.com/auth/google',
- code=self.get_argument('code'))
- user = yield self.oauth2_request(
- "https://www.googleapis.com/oauth2/v1/userinfo",
- access_token=access["access_token"])
- # Save the user and access token with
- # e.g. set_secure_cookie.
- else:
- yield self.authorize_redirect(
- redirect_uri='http://your.site.com/auth/google',
- client_id=self.settings['google_oauth']['key'],
- scope=['profile', 'email'],
- response_type='code',
- extra_params={'approval_prompt': 'auto'})
-
- .. testoutput::
- :hide:
-
- """
- http = self.get_auth_http_client()
- body = urllib_parse.urlencode({
- "redirect_uri": redirect_uri,
- "code": code,
- "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
- "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
- "grant_type": "authorization_code",
- })
-
- http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
- functools.partial(self._on_access_token, callback),
- method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
-
- def _on_access_token(self, future, response):
- """Callback function for the exchange to the access token."""
- if response.error:
- future.set_exception(AuthError('Google auth error: %s' % str(response)))
- return
-
- args = escape.json_decode(response.body)
- future.set_result(args)
-
-
-class FacebookGraphMixin(OAuth2Mixin):
- """Facebook authentication using the new Graph API and OAuth2."""
- _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
- _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
- _OAUTH_NO_CALLBACKS = False
- _FACEBOOK_BASE_URL = "https://graph.facebook.com"
-
- @_auth_return_future
- def get_authenticated_user(self, redirect_uri, client_id, client_secret,
- code, callback, extra_fields=None):
- """Handles the login for the Facebook user, returning a user object.
-
- Example usage:
-
- .. testcode::
-
- class FacebookGraphLoginHandler(tornado.web.RequestHandler,
- tornado.auth.FacebookGraphMixin):
- @tornado.gen.coroutine
- def get(self):
- if self.get_argument("code", False):
- user = yield self.get_authenticated_user(
- redirect_uri='/auth/facebookgraph/',
- client_id=self.settings["facebook_api_key"],
- client_secret=self.settings["facebook_secret"],
- code=self.get_argument("code"))
- # Save the user with e.g. set_secure_cookie
- else:
- yield self.authorize_redirect(
- redirect_uri='/auth/facebookgraph/',
- client_id=self.settings["facebook_api_key"],
- extra_params={"scope": "read_stream,offline_access"})
-
- .. testoutput::
- :hide:
-
- This method returns a dictionary which may contain the following fields:
-
- * ``access_token``, a string which may be passed to `facebook_request`
- * ``session_expires``, an integer encoded as a string representing
- the time until the access token expires in seconds. This field should
- be used like ``int(user['session_expires'])``; in a future version of
- Tornado it will change from a string to an integer.
- * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``,
- ``link``, plus any fields named in the ``extra_fields`` argument. These
- fields are copied from the Facebook graph API `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_
-
- .. versionchanged:: 4.5
- The ``session_expires`` field was updated to support changes made to the
- Facebook API in March 2017.
- """
- http = self.get_auth_http_client()
- args = {
- "redirect_uri": redirect_uri,
- "code": code,
- "client_id": client_id,
- "client_secret": client_secret,
- }
-
- fields = set(['id', 'name', 'first_name', 'last_name',
- 'locale', 'picture', 'link'])
- if extra_fields:
- fields.update(extra_fields)
-
- http.fetch(self._oauth_request_token_url(**args),
- functools.partial(self._on_access_token, redirect_uri, client_id,
- client_secret, callback, fields))
-
- def _on_access_token(self, redirect_uri, client_id, client_secret,
- future, fields, response):
- if response.error:
- future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
- return
-
- args = escape.json_decode(response.body)
- session = {
- "access_token": args.get("access_token"),
- "expires_in": args.get("expires_in")
- }
-
- self.facebook_request(
- path="/me",
- callback=functools.partial(
- self._on_get_user_info, future, session, fields),
- access_token=session["access_token"],
- appsecret_proof=hmac.new(key=client_secret.encode('utf8'),
- msg=session["access_token"].encode('utf8'),
- digestmod=hashlib.sha256).hexdigest(),
- fields=",".join(fields)
- )
-
- def _on_get_user_info(self, future, session, fields, user):
- if user is None:
- future.set_result(None)
- return
-
- fieldmap = {}
- for field in fields:
- fieldmap[field] = user.get(field)
-
- # session_expires is converted to str for compatibility with
- # older versions in which the server used url-encoding and
- # this code simply returned the string verbatim.
- # This should change in Tornado 5.0.
- fieldmap.update({"access_token": session["access_token"],
- "session_expires": str(session.get("expires_in"))})
- future.set_result(fieldmap)
-
- @_auth_return_future
- def facebook_request(self, path, callback, access_token=None,
- post_args=None, **args):
- """Fetches the given relative API path, e.g., "/btaylor/picture"
-
- If the request is a POST, ``post_args`` should be provided. Query
- string arguments should be given as keyword arguments.
-
- An introduction to the Facebook Graph API can be found at
- http://developers.facebook.com/docs/api
-
- Many methods require an OAuth access token which you can
- obtain through `~OAuth2Mixin.authorize_redirect` and
- `get_authenticated_user`. The user returned through that
- process includes an ``access_token`` attribute that can be
- used to make authenticated requests via this method.
-
- Example usage:
-
- ..testcode::
-
- class MainHandler(tornado.web.RequestHandler,
- tornado.auth.FacebookGraphMixin):
- @tornado.web.authenticated
- @tornado.gen.coroutine
- def get(self):
- new_entry = yield self.facebook_request(
- "/me/feed",
- post_args={"message": "I am posting from my Tornado application!"},
- access_token=self.current_user["access_token"])
-
- if not new_entry:
- # Call failed; perhaps missing permission?
- yield self.authorize_redirect()
- return
- self.finish("Posted a message!")
-
- .. testoutput::
- :hide:
-
- The given path is relative to ``self._FACEBOOK_BASE_URL``,
- by default "https://graph.facebook.com".
-
- This method is a wrapper around `OAuth2Mixin.oauth2_request`;
- the only difference is that this method takes a relative path,
- while ``oauth2_request`` takes a complete url.
-
- .. versionchanged:: 3.1
- Added the ability to override ``self._FACEBOOK_BASE_URL``.
- """
- url = self._FACEBOOK_BASE_URL + path
- # Thanks to the _auth_return_future decorator, our "callback"
- # argument is a Future, which we cannot pass as a callback to
- # oauth2_request. Instead, have oauth2_request return a
- # future and chain them together.
- oauth_future = self.oauth2_request(url, access_token=access_token,
- post_args=post_args, **args)
- chain_future(oauth_future, callback)
-
-
-def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
- """Calculates the HMAC-SHA1 OAuth signature for the given request.
-
- See http://oauth.net/core/1.0/#signing_process
- """
- parts = urlparse.urlparse(url)
- scheme, netloc, path = parts[:3]
- normalized_url = scheme.lower() + "://" + netloc.lower() + path
-
- base_elems = []
- base_elems.append(method.upper())
- base_elems.append(normalized_url)
- base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
- for k, v in sorted(parameters.items())))
- base_string = "&".join(_oauth_escape(e) for e in base_elems)
-
- key_elems = [escape.utf8(consumer_token["secret"])]
- key_elems.append(escape.utf8(token["secret"] if token else ""))
- key = b"&".join(key_elems)
-
- hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
- return binascii.b2a_base64(hash.digest())[:-1]
-
-
-def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
- """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
-
- See http://oauth.net/core/1.0a/#signing_process
- """
- parts = urlparse.urlparse(url)
- scheme, netloc, path = parts[:3]
- normalized_url = scheme.lower() + "://" + netloc.lower() + path
-
- base_elems = []
- base_elems.append(method.upper())
- base_elems.append(normalized_url)
- base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
- for k, v in sorted(parameters.items())))
-
- base_string = "&".join(_oauth_escape(e) for e in base_elems)
- key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
- key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
- key = b"&".join(key_elems)
-
- hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
- return binascii.b2a_base64(hash.digest())[:-1]
-
-
-def _oauth_escape(val):
- if isinstance(val, unicode_type):
- val = val.encode("utf-8")
- return urllib_parse.quote(val, safe="~")
-
-
-def _oauth_parse_response(body):
- # I can't find an officially-defined encoding for oauth responses and
- # have never seen anyone use non-ascii. Leave the response in a byte
- # string for python 2, and use utf8 on python 3.
- body = escape.native_str(body)
- p = urlparse.parse_qs(body, keep_blank_values=False)
- token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
-
- # Add the extra parameters the Provider included to the token
- special = ("oauth_token", "oauth_token_secret")
- token.update((k, p[k][0]) for k in p if k not in special)
- return token
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This module contains implementations of various third-party
+authentication schemes.
+
+All the classes in this file are class mixins designed to be used with
+the `tornado.web.RequestHandler` class. They are used in two ways:
+
+* On a login handler, use methods such as ``authenticate_redirect()``,
+ ``authorize_redirect()``, and ``get_authenticated_user()`` to
+ establish the user's identity and store authentication tokens to your
+ database and/or cookies.
+* In non-login handlers, use methods such as ``facebook_request()``
+ or ``twitter_request()`` to use the authentication tokens to make
+ requests to the respective services.
+
+They all take slightly different arguments due to the fact all these
+services implement authentication and authorization slightly differently.
+See the individual service classes below for complete documentation.
+
+Example usage for Google OAuth:
+
+.. testcode::
+
+ class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
+ tornado.auth.GoogleOAuth2Mixin):
+ @tornado.gen.coroutine
+ def get(self):
+ if self.get_argument('code', False):
+ user = yield self.get_authenticated_user(
+ redirect_uri='http://your.site.com/auth/google',
+ code=self.get_argument('code'))
+ # Save the user with e.g. set_secure_cookie
+ else:
+ yield self.authorize_redirect(
+ redirect_uri='http://your.site.com/auth/google',
+ client_id=self.settings['google_oauth']['key'],
+ scope=['profile', 'email'],
+ response_type='code',
+ extra_params={'approval_prompt': 'auto'})
+
+.. testoutput::
+ :hide:
+
+
+.. versionchanged:: 4.0
+ All of the callback interfaces in this module are now guaranteed
+ to run their callback with an argument of ``None`` on error.
+ Previously some functions would do this while others would simply
+ terminate the request on their own. This change also ensures that
+ errors are more consistently reported through the ``Future`` interfaces.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import base64
+import binascii
+import functools
+import hashlib
+import hmac
+import time
+import uuid
+
+from tornado.concurrent import TracebackFuture, return_future, chain_future
+from tornado import gen
+from tornado import httpclient
+from tornado import escape
+from tornado.httputil import url_concat
+from tornado.log import gen_log
+from tornado.stack_context import ExceptionStackContext
+from tornado.util import unicode_type, ArgReplacer, PY3
+
+if PY3:
+ import urllib.parse as urlparse
+ import urllib.parse as urllib_parse
+ long = int
+else:
+ import urlparse
+ import urllib as urllib_parse
+
+
+class AuthError(Exception):
+ pass
+
+
+def _auth_future_to_callback(callback, future):
+ try:
+ result = future.result()
+ except AuthError as e:
+ gen_log.warning(str(e))
+ result = None
+ callback(result)
+
+
+def _auth_return_future(f):
+ """Similar to tornado.concurrent.return_future, but uses the auth
+ module's legacy callback interface.
+
+ Note that when using this decorator the ``callback`` parameter
+ inside the function will actually be a future.
+ """
+ replacer = ArgReplacer(f, 'callback')
+
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ future = TracebackFuture()
+ callback, args, kwargs = replacer.replace(future, args, kwargs)
+ if callback is not None:
+ future.add_done_callback(
+ functools.partial(_auth_future_to_callback, callback))
+
+ def handle_exception(typ, value, tb):
+ if future.done():
+ return False
+ else:
+ future.set_exc_info((typ, value, tb))
+ return True
+ with ExceptionStackContext(handle_exception):
+ f(*args, **kwargs)
+ return future
+ return wrapper
+
+
+class OpenIdMixin(object):
+ """Abstract implementation of OpenID and Attribute Exchange.
+
+ Class attributes:
+
+ * ``_OPENID_ENDPOINT``: the identity provider's URI.
+ """
+ @return_future
+ def authenticate_redirect(self, callback_uri=None,
+ ax_attrs=["name", "email", "language", "username"],
+ callback=None):
+ """Redirects to the authentication URL for this service.
+
+ After authentication, the service will redirect back to the given
+ callback URI with additional parameters including ``openid.mode``.
+
+ We request the given attributes for the authenticated user by
+ default (name, email, language, and username). If you don't need
+ all those attributes for your app, you can request fewer with
+ the ax_attrs keyword argument.
+
+ .. versionchanged:: 3.1
+ Returns a `.Future` and takes an optional callback. These are
+ not strictly necessary as this method is synchronous,
+ but they are supplied for consistency with
+ `OAuthMixin.authorize_redirect`.
+ """
+ callback_uri = callback_uri or self.request.uri
+ args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
+ self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
+ callback()
+
+ @_auth_return_future
+ def get_authenticated_user(self, callback, http_client=None):
+ """Fetches the authenticated user data upon redirect.
+
+ This method should be called by the handler that receives the
+ redirect from the `authenticate_redirect()` method (which is
+ often the same as the one that calls it; in that case you would
+ call `get_authenticated_user` if the ``openid.mode`` parameter
+ is present and `authenticate_redirect` if it is not).
+
+ The result of this method will generally be used to set a cookie.
+ """
+ # Verify the OpenID response via direct request to the OP
+ args = dict((k, v[-1]) for k, v in self.request.arguments.items())
+ args["openid.mode"] = u"check_authentication"
+ url = self._OPENID_ENDPOINT
+ if http_client is None:
+ http_client = self.get_auth_http_client()
+ http_client.fetch(url, functools.partial(
+ self._on_authentication_verified, callback),
+ method="POST", body=urllib_parse.urlencode(args))
+
+ def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
+ url = urlparse.urljoin(self.request.full_url(), callback_uri)
+ args = {
+ "openid.ns": "http://specs.openid.net/auth/2.0",
+ "openid.claimed_id":
+ "http://specs.openid.net/auth/2.0/identifier_select",
+ "openid.identity":
+ "http://specs.openid.net/auth/2.0/identifier_select",
+ "openid.return_to": url,
+ "openid.realm": urlparse.urljoin(url, '/'),
+ "openid.mode": "checkid_setup",
+ }
+ if ax_attrs:
+ args.update({
+ "openid.ns.ax": "http://openid.net/srv/ax/1.0",
+ "openid.ax.mode": "fetch_request",
+ })
+ ax_attrs = set(ax_attrs)
+ required = []
+ if "name" in ax_attrs:
+ ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
+ required += ["firstname", "fullname", "lastname"]
+ args.update({
+ "openid.ax.type.firstname":
+ "http://axschema.org/namePerson/first",
+ "openid.ax.type.fullname":
+ "http://axschema.org/namePerson",
+ "openid.ax.type.lastname":
+ "http://axschema.org/namePerson/last",
+ })
+ known_attrs = {
+ "email": "http://axschema.org/contact/email",
+ "language": "http://axschema.org/pref/language",
+ "username": "http://axschema.org/namePerson/friendly",
+ }
+ for name in ax_attrs:
+ args["openid.ax.type." + name] = known_attrs[name]
+ required.append(name)
+ args["openid.ax.required"] = ",".join(required)
+ if oauth_scope:
+ args.update({
+ "openid.ns.oauth":
+ "http://specs.openid.net/extensions/oauth/1.0",
+ "openid.oauth.consumer": self.request.host.split(":")[0],
+ "openid.oauth.scope": oauth_scope,
+ })
+ return args
+
+ def _on_authentication_verified(self, future, response):
+ if response.error or b"is_valid:true" not in response.body:
+ future.set_exception(AuthError(
+ "Invalid OpenID response: %s" % (response.error or
+ response.body)))
+ return
+
+ # Make sure we got back at least an email from attribute exchange
+ ax_ns = None
+ for name in self.request.arguments:
+ if name.startswith("openid.ns.") and \
+ self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
+ ax_ns = name[10:]
+ break
+
+ def get_ax_arg(uri):
+ if not ax_ns:
+ return u""
+ prefix = "openid." + ax_ns + ".type."
+ ax_name = None
+ for name in self.request.arguments.keys():
+ if self.get_argument(name) == uri and name.startswith(prefix):
+ part = name[len(prefix):]
+ ax_name = "openid." + ax_ns + ".value." + part
+ break
+ if not ax_name:
+ return u""
+ return self.get_argument(ax_name, u"")
+
+ email = get_ax_arg("http://axschema.org/contact/email")
+ name = get_ax_arg("http://axschema.org/namePerson")
+ first_name = get_ax_arg("http://axschema.org/namePerson/first")
+ last_name = get_ax_arg("http://axschema.org/namePerson/last")
+ username = get_ax_arg("http://axschema.org/namePerson/friendly")
+ locale = get_ax_arg("http://axschema.org/pref/language").lower()
+ user = dict()
+ name_parts = []
+ if first_name:
+ user["first_name"] = first_name
+ name_parts.append(first_name)
+ if last_name:
+ user["last_name"] = last_name
+ name_parts.append(last_name)
+ if name:
+ user["name"] = name
+ elif name_parts:
+ user["name"] = u" ".join(name_parts)
+ elif email:
+ user["name"] = email.split("@")[0]
+ if email:
+ user["email"] = email
+ if locale:
+ user["locale"] = locale
+ if username:
+ user["username"] = username
+ claimed_id = self.get_argument("openid.claimed_id", None)
+ if claimed_id:
+ user["claimed_id"] = claimed_id
+ future.set_result(user)
+
+ def get_auth_http_client(self):
+ """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
+
+ May be overridden by subclasses to use an HTTP client other than
+ the default.
+ """
+ return httpclient.AsyncHTTPClient()
+
+
+class OAuthMixin(object):
+ """Abstract implementation of OAuth 1.0 and 1.0a.
+
+ See `TwitterMixin` below for an example implementation.
+
+ Class attributes:
+
+ * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
+ * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
+ * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
+ * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
+ advance registration of callbacks.
+
+ Subclasses must also override the `_oauth_get_user_future` and
+ `_oauth_consumer_token` methods.
+ """
+ @return_future
+ def authorize_redirect(self, callback_uri=None, extra_params=None,
+ http_client=None, callback=None):
+ """Redirects the user to obtain OAuth authorization for this service.
+
+ The ``callback_uri`` may be omitted if you have previously
+ registered a callback URI with the third-party service. For
+ some services (including Friendfeed), you must use a
+ previously-registered callback URI and cannot specify a
+ callback via this method.
+
+ This method sets a cookie called ``_oauth_request_token`` which is
+ subsequently used (and cleared) in `get_authenticated_user` for
+ security purposes.
+
+ Note that this method is asynchronous, although it calls
+ `.RequestHandler.finish` for you so it may not be necessary
+ to pass a callback or use the `.Future` it returns. However,
+ if this method is called from a function decorated with
+ `.gen.coroutine`, you must call it with ``yield`` to keep the
+ response from being closed prematurely.
+
+ .. versionchanged:: 3.1
+ Now returns a `.Future` and takes an optional callback, for
+ compatibility with `.gen.coroutine`.
+ """
+ if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
+ raise Exception("This service does not support oauth_callback")
+ if http_client is None:
+ http_client = self.get_auth_http_client()
+ if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
+ http_client.fetch(
+ self._oauth_request_token_url(callback_uri=callback_uri,
+ extra_params=extra_params),
+ functools.partial(
+ self._on_request_token,
+ self._OAUTH_AUTHORIZE_URL,
+ callback_uri,
+ callback))
+ else:
+ http_client.fetch(
+ self._oauth_request_token_url(),
+ functools.partial(
+ self._on_request_token, self._OAUTH_AUTHORIZE_URL,
+ callback_uri,
+ callback))
+
+ @_auth_return_future
+ def get_authenticated_user(self, callback, http_client=None):
+ """Gets the OAuth authorized user and access token.
+
+ This method should be called from the handler for your
+ OAuth callback URL to complete the registration process. We run the
+ callback with the authenticated user dictionary. This dictionary
+ will contain an ``access_key`` which can be used to make authorized
+ requests to this service on behalf of the user. The dictionary will
+ also contain other fields such as ``name``, depending on the service
+ used.
+ """
+ future = callback
+ request_key = escape.utf8(self.get_argument("oauth_token"))
+ oauth_verifier = self.get_argument("oauth_verifier", None)
+ request_cookie = self.get_cookie("_oauth_request_token")
+ if not request_cookie:
+ future.set_exception(AuthError(
+ "Missing OAuth request token cookie"))
+ return
+ self.clear_cookie("_oauth_request_token")
+ cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
+ if cookie_key != request_key:
+ future.set_exception(AuthError(
+ "Request token does not match cookie"))
+ return
+ token = dict(key=cookie_key, secret=cookie_secret)
+ if oauth_verifier:
+ token["verifier"] = oauth_verifier
+ if http_client is None:
+ http_client = self.get_auth_http_client()
+ http_client.fetch(self._oauth_access_token_url(token),
+ functools.partial(self._on_access_token, callback))
+
+ def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
+ consumer_token = self._oauth_consumer_token()
+ url = self._OAUTH_REQUEST_TOKEN_URL
+ args = dict(
+ oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
+ oauth_signature_method="HMAC-SHA1",
+ oauth_timestamp=str(int(time.time())),
+ oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
+ oauth_version="1.0",
+ )
+ if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
+ if callback_uri == "oob":
+ args["oauth_callback"] = "oob"
+ elif callback_uri:
+ args["oauth_callback"] = urlparse.urljoin(
+ self.request.full_url(), callback_uri)
+ if extra_params:
+ args.update(extra_params)
+ signature = _oauth10a_signature(consumer_token, "GET", url, args)
+ else:
+ signature = _oauth_signature(consumer_token, "GET", url, args)
+
+ args["oauth_signature"] = signature
+ return url + "?" + urllib_parse.urlencode(args)
+
+ def _on_request_token(self, authorize_url, callback_uri, callback,
+ response):
+ if response.error:
+ raise Exception("Could not get request token: %s" % response.error)
+ request_token = _oauth_parse_response(response.body)
+ data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
+ base64.b64encode(escape.utf8(request_token["secret"])))
+ self.set_cookie("_oauth_request_token", data)
+ args = dict(oauth_token=request_token["key"])
+ if callback_uri == "oob":
+ self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
+ callback()
+ return
+ elif callback_uri:
+ args["oauth_callback"] = urlparse.urljoin(
+ self.request.full_url(), callback_uri)
+ self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
+ callback()
+
+ def _oauth_access_token_url(self, request_token):
+ consumer_token = self._oauth_consumer_token()
+ url = self._OAUTH_ACCESS_TOKEN_URL
+ args = dict(
+ oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
+ oauth_token=escape.to_basestring(request_token["key"]),
+ oauth_signature_method="HMAC-SHA1",
+ oauth_timestamp=str(int(time.time())),
+ oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
+ oauth_version="1.0",
+ )
+ if "verifier" in request_token:
+ args["oauth_verifier"] = request_token["verifier"]
+
+ if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
+ signature = _oauth10a_signature(consumer_token, "GET", url, args,
+ request_token)
+ else:
+ signature = _oauth_signature(consumer_token, "GET", url, args,
+ request_token)
+
+ args["oauth_signature"] = signature
+ return url + "?" + urllib_parse.urlencode(args)
+
+ def _on_access_token(self, future, response):
+ if response.error:
+ future.set_exception(AuthError("Could not fetch access token"))
+ return
+
+ access_token = _oauth_parse_response(response.body)
+ self._oauth_get_user_future(access_token).add_done_callback(
+ functools.partial(self._on_oauth_get_user, access_token, future))
+
+ def _oauth_consumer_token(self):
+ """Subclasses must override this to return their OAuth consumer keys.
+
+ The return value should be a `dict` with keys ``key`` and ``secret``.
+ """
+ raise NotImplementedError()
+
+ @return_future
+ def _oauth_get_user_future(self, access_token, callback):
+ """Subclasses must override this to get basic information about the
+ user.
+
+ Should return a `.Future` whose result is a dictionary
+ containing information about the user, which may have been
+ retrieved by using ``access_token`` to make a request to the
+ service.
+
+ The access token will be added to the returned dictionary to make
+ the result of `get_authenticated_user`.
+
+ For backwards compatibility, the callback-based ``_oauth_get_user``
+ method is also supported.
+ """
+ # By default, call the old-style _oauth_get_user, but new code
+ # should override this method instead.
+ self._oauth_get_user(access_token, callback)
+
+ def _oauth_get_user(self, access_token, callback):
+ raise NotImplementedError()
+
+ def _on_oauth_get_user(self, access_token, future, user_future):
+ if user_future.exception() is not None:
+ future.set_exception(user_future.exception())
+ return
+ user = user_future.result()
+ if not user:
+ future.set_exception(AuthError("Error getting user"))
+ return
+ user["access_token"] = access_token
+ future.set_result(user)
+
+ def _oauth_request_parameters(self, url, access_token, parameters={},
+ method="GET"):
+ """Returns the OAuth parameters as a dict for the given request.
+
+ parameters should include all POST arguments and query string arguments
+ that will be sent with the request.
+ """
+ consumer_token = self._oauth_consumer_token()
+ base_args = dict(
+ oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
+ oauth_token=escape.to_basestring(access_token["key"]),
+ oauth_signature_method="HMAC-SHA1",
+ oauth_timestamp=str(int(time.time())),
+ oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
+ oauth_version="1.0",
+ )
+ args = {}
+ args.update(base_args)
+ args.update(parameters)
+ if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
+ signature = _oauth10a_signature(consumer_token, method, url, args,
+ access_token)
+ else:
+ signature = _oauth_signature(consumer_token, method, url, args,
+ access_token)
+ base_args["oauth_signature"] = escape.to_basestring(signature)
+ return base_args
+
+ def get_auth_http_client(self):
+ """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
+
+ May be overridden by subclasses to use an HTTP client other than
+ the default.
+ """
+ return httpclient.AsyncHTTPClient()
+
+
+class OAuth2Mixin(object):
+ """Abstract implementation of OAuth 2.0.
+
+ See `FacebookGraphMixin` or `GoogleOAuth2Mixin` below for example
+ implementations.
+
+ Class attributes:
+
+ * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
+ * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
+ """
+ @return_future
+ def authorize_redirect(self, redirect_uri=None, client_id=None,
+ client_secret=None, extra_params=None,
+ callback=None, scope=None, response_type="code"):
+ """Redirects the user to obtain OAuth authorization for this service.
+
+ Some providers require that you register a redirect URL with
+ your application instead of passing one via this method. You
+ should call this method to log the user in, and then call
+ ``get_authenticated_user`` in the handler for your
+ redirect URL to complete the authorization process.
+
+ .. versionchanged:: 3.1
+ Returns a `.Future` and takes an optional callback. These are
+ not strictly necessary as this method is synchronous,
+ but they are supplied for consistency with
+ `OAuthMixin.authorize_redirect`.
+ """
+ args = {
+ "redirect_uri": redirect_uri,
+ "client_id": client_id,
+ "response_type": response_type
+ }
+ if extra_params:
+ args.update(extra_params)
+ if scope:
+ args['scope'] = ' '.join(scope)
+ self.redirect(
+ url_concat(self._OAUTH_AUTHORIZE_URL, args))
+ callback()
+
+ def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
+ client_secret=None, code=None,
+ extra_params=None):
+ url = self._OAUTH_ACCESS_TOKEN_URL
+ args = dict(
+ redirect_uri=redirect_uri,
+ code=code,
+ client_id=client_id,
+ client_secret=client_secret,
+ )
+ if extra_params:
+ args.update(extra_params)
+ return url_concat(url, args)
+
+ @_auth_return_future
+ def oauth2_request(self, url, callback, access_token=None,
+ post_args=None, **args):
+ """Fetches the given URL auth an OAuth2 access token.
+
+ If the request is a POST, ``post_args`` should be provided. Query
+ string arguments should be given as keyword arguments.
+
+ Example usage:
+
+ ..testcode::
+
+ class MainHandler(tornado.web.RequestHandler,
+ tornado.auth.FacebookGraphMixin):
+ @tornado.web.authenticated
+ @tornado.gen.coroutine
+ def get(self):
+ new_entry = yield self.oauth2_request(
+ "https://graph.facebook.com/me/feed",
+ post_args={"message": "I am posting from my Tornado application!"},
+ access_token=self.current_user["access_token"])
+
+ if not new_entry:
+ # Call failed; perhaps missing permission?
+ yield self.authorize_redirect()
+ return
+ self.finish("Posted a message!")
+
+ .. testoutput::
+ :hide:
+
+ .. versionadded:: 4.3
+ """
+ all_args = {}
+ if access_token:
+ all_args["access_token"] = access_token
+ all_args.update(args)
+
+ if all_args:
+ url += "?" + urllib_parse.urlencode(all_args)
+ callback = functools.partial(self._on_oauth2_request, callback)
+ http = self.get_auth_http_client()
+ if post_args is not None:
+ http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
+ callback=callback)
+ else:
+ http.fetch(url, callback=callback)
+
+ def _on_oauth2_request(self, future, response):
+ if response.error:
+ future.set_exception(AuthError("Error response %s fetching %s" %
+ (response.error, response.request.url)))
+ return
+
+ future.set_result(escape.json_decode(response.body))
+
+ def get_auth_http_client(self):
+ """Returns the `.AsyncHTTPClient` instance to be used for auth requests.
+
+ May be overridden by subclasses to use an HTTP client other than
+ the default.
+
+ .. versionadded:: 4.3
+ """
+ return httpclient.AsyncHTTPClient()
+
+
+class TwitterMixin(OAuthMixin):
+ """Twitter OAuth authentication.
+
+ To authenticate with Twitter, register your application with
+ Twitter at http://twitter.com/apps. Then copy your Consumer Key
+ and Consumer Secret to the application
+ `~tornado.web.Application.settings` ``twitter_consumer_key`` and
+ ``twitter_consumer_secret``. Use this mixin on the handler for the
+ URL you registered as your application's callback URL.
+
+ When your application is set up, you can use this mixin like this
+ to authenticate the user with Twitter and get access to their stream:
+
+ .. testcode::
+
+ class TwitterLoginHandler(tornado.web.RequestHandler,
+ tornado.auth.TwitterMixin):
+ @tornado.gen.coroutine
+ def get(self):
+ if self.get_argument("oauth_token", None):
+ user = yield self.get_authenticated_user()
+ # Save the user using e.g. set_secure_cookie()
+ else:
+ yield self.authorize_redirect()
+
+ .. testoutput::
+ :hide:
+
+ The user object returned by `~OAuthMixin.get_authenticated_user`
+ includes the attributes ``username``, ``name``, ``access_token``,
+ and all of the custom Twitter user attributes described at
+ https://dev.twitter.com/docs/api/1.1/get/users/show
+ """
+ _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
+ _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
+ _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
+ _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
+ _OAUTH_NO_CALLBACKS = False
+ _TWITTER_BASE_URL = "https://api.twitter.com/1.1"
+
+ @return_future
+ def authenticate_redirect(self, callback_uri=None, callback=None):
+ """Just like `~OAuthMixin.authorize_redirect`, but
+ auto-redirects if authorized.
+
+ This is generally the right interface to use if you are using
+ Twitter for single-sign on.
+
+ .. versionchanged:: 3.1
+ Now returns a `.Future` and takes an optional callback, for
+ compatibility with `.gen.coroutine`.
+ """
+ http = self.get_auth_http_client()
+ http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
+ functools.partial(
+ self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
+ None, callback))
+
+ @_auth_return_future
+ def twitter_request(self, path, callback=None, access_token=None,
+ post_args=None, **args):
+ """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
+
+ The path should not include the format or API version number.
+ (we automatically use JSON format and API version 1).
+
+ If the request is a POST, ``post_args`` should be provided. Query
+ string arguments should be given as keyword arguments.
+
+ All the Twitter methods are documented at http://dev.twitter.com/
+
+ Many methods require an OAuth access token which you can
+ obtain through `~OAuthMixin.authorize_redirect` and
+ `~OAuthMixin.get_authenticated_user`. The user returned through that
+ process includes an 'access_token' attribute that can be used
+ to make authenticated requests via this method. Example
+ usage:
+
+ .. testcode::
+
+ class MainHandler(tornado.web.RequestHandler,
+ tornado.auth.TwitterMixin):
+ @tornado.web.authenticated
+ @tornado.gen.coroutine
+ def get(self):
+ new_entry = yield self.twitter_request(
+ "/statuses/update",
+ post_args={"status": "Testing Tornado Web Server"},
+ access_token=self.current_user["access_token"])
+ if not new_entry:
+ # Call failed; perhaps missing permission?
+ yield self.authorize_redirect()
+ return
+ self.finish("Posted a message!")
+
+ .. testoutput::
+ :hide:
+
+ """
+ if path.startswith('http:') or path.startswith('https:'):
+ # Raw urls are useful for e.g. search which doesn't follow the
+ # usual pattern: http://search.twitter.com/search.json
+ url = path
+ else:
+ url = self._TWITTER_BASE_URL + path + ".json"
+ # Add the OAuth resource request signature if we have credentials
+ if access_token:
+ all_args = {}
+ all_args.update(args)
+ all_args.update(post_args or {})
+ method = "POST" if post_args is not None else "GET"
+ oauth = self._oauth_request_parameters(
+ url, access_token, all_args, method=method)
+ args.update(oauth)
+ if args:
+ url += "?" + urllib_parse.urlencode(args)
+ http = self.get_auth_http_client()
+ http_callback = functools.partial(self._on_twitter_request, callback)
+ if post_args is not None:
+ http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
+ callback=http_callback)
+ else:
+ http.fetch(url, callback=http_callback)
+
+ def _on_twitter_request(self, future, response):
+ if response.error:
+ future.set_exception(AuthError(
+ "Error response %s fetching %s" % (response.error,
+ response.request.url)))
+ return
+ future.set_result(escape.json_decode(response.body))
+
+ def _oauth_consumer_token(self):
+ self.require_setting("twitter_consumer_key", "Twitter OAuth")
+ self.require_setting("twitter_consumer_secret", "Twitter OAuth")
+ return dict(
+ key=self.settings["twitter_consumer_key"],
+ secret=self.settings["twitter_consumer_secret"])
+
+ @gen.coroutine
+ def _oauth_get_user_future(self, access_token):
+ user = yield self.twitter_request(
+ "/account/verify_credentials",
+ access_token=access_token)
+ if user:
+ user["username"] = user["screen_name"]
+ raise gen.Return(user)
+
+
+class GoogleOAuth2Mixin(OAuth2Mixin):
+ """Google authentication using OAuth2.
+
+ In order to use, register your application with Google and copy the
+ relevant parameters to your application settings.
+
+ * Go to the Google Dev Console at http://console.developers.google.com
+ * Select a project, or create a new one.
+ * In the sidebar on the left, select APIs & Auth.
+ * In the list of APIs, find the Google+ API service and set it to ON.
+ * In the sidebar on the left, select Credentials.
+ * In the OAuth section of the page, select Create New Client ID.
+ * Set the Redirect URI to point to your auth handler
+ * Copy the "Client secret" and "Client ID" to the application settings as
+ {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
+
+ .. versionadded:: 3.2
+ """
+ _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
+ _OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
+ _OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
+ _OAUTH_NO_CALLBACKS = False
+ _OAUTH_SETTINGS_KEY = 'google_oauth'
+
+ @_auth_return_future
+ def get_authenticated_user(self, redirect_uri, code, callback):
+ """Handles the login for the Google user, returning an access token.
+
+ The result is a dictionary containing an ``access_token`` field
+ ([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
+ Unlike other ``get_authenticated_user`` methods in this package,
+ this method does not return any additional information about the user.
+ The returned access token can be used with `OAuth2Mixin.oauth2_request`
+ to request additional information (perhaps from
+ ``https://www.googleapis.com/oauth2/v2/userinfo``)
+
+ Example usage:
+
+ .. testcode::
+
+ class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
+ tornado.auth.GoogleOAuth2Mixin):
+ @tornado.gen.coroutine
+ def get(self):
+ if self.get_argument('code', False):
+ access = yield self.get_authenticated_user(
+ redirect_uri='http://your.site.com/auth/google',
+ code=self.get_argument('code'))
+ user = yield self.oauth2_request(
+ "https://www.googleapis.com/oauth2/v1/userinfo",
+ access_token=access["access_token"])
+ # Save the user and access token with
+ # e.g. set_secure_cookie.
+ else:
+ yield self.authorize_redirect(
+ redirect_uri='http://your.site.com/auth/google',
+ client_id=self.settings['google_oauth']['key'],
+ scope=['profile', 'email'],
+ response_type='code',
+ extra_params={'approval_prompt': 'auto'})
+
+ .. testoutput::
+ :hide:
+
+ """
+ http = self.get_auth_http_client()
+ body = urllib_parse.urlencode({
+ "redirect_uri": redirect_uri,
+ "code": code,
+ "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
+ "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
+ "grant_type": "authorization_code",
+ })
+
+ http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
+ functools.partial(self._on_access_token, callback),
+ method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
+
+ def _on_access_token(self, future, response):
+ """Callback function for the exchange to the access token."""
+ if response.error:
+ future.set_exception(AuthError('Google auth error: %s' % str(response)))
+ return
+
+ args = escape.json_decode(response.body)
+ future.set_result(args)
+
+
+class FacebookGraphMixin(OAuth2Mixin):
+ """Facebook authentication using the new Graph API and OAuth2."""
+ _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
+ _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
+ _OAUTH_NO_CALLBACKS = False
+ _FACEBOOK_BASE_URL = "https://graph.facebook.com"
+
+ @_auth_return_future
+ def get_authenticated_user(self, redirect_uri, client_id, client_secret,
+ code, callback, extra_fields=None):
+ """Handles the login for the Facebook user, returning a user object.
+
+ Example usage:
+
+ .. testcode::
+
+ class FacebookGraphLoginHandler(tornado.web.RequestHandler,
+ tornado.auth.FacebookGraphMixin):
+ @tornado.gen.coroutine
+ def get(self):
+ if self.get_argument("code", False):
+ user = yield self.get_authenticated_user(
+ redirect_uri='/auth/facebookgraph/',
+ client_id=self.settings["facebook_api_key"],
+ client_secret=self.settings["facebook_secret"],
+ code=self.get_argument("code"))
+ # Save the user with e.g. set_secure_cookie
+ else:
+ yield self.authorize_redirect(
+ redirect_uri='/auth/facebookgraph/',
+ client_id=self.settings["facebook_api_key"],
+ extra_params={"scope": "read_stream,offline_access"})
+
+ .. testoutput::
+ :hide:
+
+ This method returns a dictionary which may contain the following fields:
+
+ * ``access_token``, a string which may be passed to `facebook_request`
+ * ``session_expires``, an integer encoded as a string representing
+ the time until the access token expires in seconds. This field should
+ be used like ``int(user['session_expires'])``; in a future version of
+ Tornado it will change from a string to an integer.
+ * ``id``, ``name``, ``first_name``, ``last_name``, ``locale``, ``picture``,
+ ``link``, plus any fields named in the ``extra_fields`` argument. These
+ fields are copied from the Facebook graph API `user object <https://developers.facebook.com/docs/graph-api/reference/user>`_
+
+ .. versionchanged:: 4.5
+ The ``session_expires`` field was updated to support changes made to the
+ Facebook API in March 2017.
+ """
+ http = self.get_auth_http_client()
+ args = {
+ "redirect_uri": redirect_uri,
+ "code": code,
+ "client_id": client_id,
+ "client_secret": client_secret,
+ }
+
+ fields = set(['id', 'name', 'first_name', 'last_name',
+ 'locale', 'picture', 'link'])
+ if extra_fields:
+ fields.update(extra_fields)
+
+ http.fetch(self._oauth_request_token_url(**args),
+ functools.partial(self._on_access_token, redirect_uri, client_id,
+ client_secret, callback, fields))
+
+ def _on_access_token(self, redirect_uri, client_id, client_secret,
+ future, fields, response):
+ if response.error:
+ future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
+ return
+
+ args = escape.json_decode(response.body)
+ session = {
+ "access_token": args.get("access_token"),
+ "expires_in": args.get("expires_in")
+ }
+
+ self.facebook_request(
+ path="/me",
+ callback=functools.partial(
+ self._on_get_user_info, future, session, fields),
+ access_token=session["access_token"],
+ appsecret_proof=hmac.new(key=client_secret.encode('utf8'),
+ msg=session["access_token"].encode('utf8'),
+ digestmod=hashlib.sha256).hexdigest(),
+ fields=",".join(fields)
+ )
+
+ def _on_get_user_info(self, future, session, fields, user):
+ if user is None:
+ future.set_result(None)
+ return
+
+ fieldmap = {}
+ for field in fields:
+ fieldmap[field] = user.get(field)
+
+ # session_expires is converted to str for compatibility with
+ # older versions in which the server used url-encoding and
+ # this code simply returned the string verbatim.
+ # This should change in Tornado 5.0.
+ fieldmap.update({"access_token": session["access_token"],
+ "session_expires": str(session.get("expires_in"))})
+ future.set_result(fieldmap)
+
+ @_auth_return_future
+ def facebook_request(self, path, callback, access_token=None,
+ post_args=None, **args):
+ """Fetches the given relative API path, e.g., "/btaylor/picture"
+
+ If the request is a POST, ``post_args`` should be provided. Query
+ string arguments should be given as keyword arguments.
+
+ An introduction to the Facebook Graph API can be found at
+ http://developers.facebook.com/docs/api
+
+ Many methods require an OAuth access token which you can
+ obtain through `~OAuth2Mixin.authorize_redirect` and
+ `get_authenticated_user`. The user returned through that
+ process includes an ``access_token`` attribute that can be
+ used to make authenticated requests via this method.
+
+ Example usage:
+
+ ..testcode::
+
+ class MainHandler(tornado.web.RequestHandler,
+ tornado.auth.FacebookGraphMixin):
+ @tornado.web.authenticated
+ @tornado.gen.coroutine
+ def get(self):
+ new_entry = yield self.facebook_request(
+ "/me/feed",
+ post_args={"message": "I am posting from my Tornado application!"},
+ access_token=self.current_user["access_token"])
+
+ if not new_entry:
+ # Call failed; perhaps missing permission?
+ yield self.authorize_redirect()
+ return
+ self.finish("Posted a message!")
+
+ .. testoutput::
+ :hide:
+
+ The given path is relative to ``self._FACEBOOK_BASE_URL``,
+ by default "https://graph.facebook.com".
+
+ This method is a wrapper around `OAuth2Mixin.oauth2_request`;
+ the only difference is that this method takes a relative path,
+ while ``oauth2_request`` takes a complete url.
+
+ .. versionchanged:: 3.1
+ Added the ability to override ``self._FACEBOOK_BASE_URL``.
+ """
+ url = self._FACEBOOK_BASE_URL + path
+ # Thanks to the _auth_return_future decorator, our "callback"
+ # argument is a Future, which we cannot pass as a callback to
+ # oauth2_request. Instead, have oauth2_request return a
+ # future and chain them together.
+ oauth_future = self.oauth2_request(url, access_token=access_token,
+ post_args=post_args, **args)
+ chain_future(oauth_future, callback)
+
+
+def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
+ """Calculates the HMAC-SHA1 OAuth signature for the given request.
+
+ See http://oauth.net/core/1.0/#signing_process
+ """
+ parts = urlparse.urlparse(url)
+ scheme, netloc, path = parts[:3]
+ normalized_url = scheme.lower() + "://" + netloc.lower() + path
+
+ base_elems = []
+ base_elems.append(method.upper())
+ base_elems.append(normalized_url)
+ base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
+ for k, v in sorted(parameters.items())))
+ base_string = "&".join(_oauth_escape(e) for e in base_elems)
+
+ key_elems = [escape.utf8(consumer_token["secret"])]
+ key_elems.append(escape.utf8(token["secret"] if token else ""))
+ key = b"&".join(key_elems)
+
+ hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
+ return binascii.b2a_base64(hash.digest())[:-1]
+
+
+def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
+ """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
+
+ See http://oauth.net/core/1.0a/#signing_process
+ """
+ parts = urlparse.urlparse(url)
+ scheme, netloc, path = parts[:3]
+ normalized_url = scheme.lower() + "://" + netloc.lower() + path
+
+ base_elems = []
+ base_elems.append(method.upper())
+ base_elems.append(normalized_url)
+ base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
+ for k, v in sorted(parameters.items())))
+
+ base_string = "&".join(_oauth_escape(e) for e in base_elems)
+ key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
+ key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
+ key = b"&".join(key_elems)
+
+ hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
+ return binascii.b2a_base64(hash.digest())[:-1]
+
+
+def _oauth_escape(val):
+ if isinstance(val, unicode_type):
+ val = val.encode("utf-8")
+ return urllib_parse.quote(val, safe="~")
+
+
+def _oauth_parse_response(body):
+ # I can't find an officially-defined encoding for oauth responses and
+ # have never seen anyone use non-ascii. Leave the response in a byte
+ # string for python 2, and use utf8 on python 3.
+ body = escape.native_str(body)
+ p = urlparse.parse_qs(body, keep_blank_values=False)
+ token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
+
+ # Add the extra parameters the Provider included to the token
+ special = ("oauth_token", "oauth_token_secret")
+ token.update((k, p[k][0]) for k in p if k not in special)
+ return token
diff --git a/contrib/python/tornado/tornado-4/tornado/autoreload.py b/contrib/python/tornado/tornado-4/tornado/autoreload.py
index 60571efe71..fb4a7d9b83 100644
--- a/contrib/python/tornado/tornado-4/tornado/autoreload.py
+++ b/contrib/python/tornado/tornado-4/tornado/autoreload.py
@@ -1,334 +1,334 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Automatically restart the server when a source file is modified.
-
-Most applications should not access this module directly. Instead,
-pass the keyword argument ``autoreload=True`` to the
-`tornado.web.Application` constructor (or ``debug=True``, which
-enables this setting and several others). This will enable autoreload
-mode as well as checking for changes to templates and static
-resources. Note that restarting is a destructive operation and any
-requests in progress will be aborted when the process restarts. (If
-you want to disable autoreload while using other debug-mode features,
-pass both ``debug=True`` and ``autoreload=False``).
-
-This module can also be used as a command-line wrapper around scripts
-such as unit test runners. See the `main` method for details.
-
-The command-line wrapper and Application debug modes can be used together.
-This combination is encouraged as the wrapper catches syntax errors and
-other import-time failures, while debug mode catches changes once
-the server has started.
-
-This module depends on `.IOLoop`, so it will not work in WSGI applications
-and Google App Engine. It also will not work correctly when `.HTTPServer`'s
-multi-process mode is used.
-
-Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
-because it re-executes Python using ``sys.executable`` and ``sys.argv``.
-Additionally, modifying these variables will cause reloading to behave
-incorrectly.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import os
-import sys
-
-# sys.path handling
-# -----------------
-#
-# If a module is run with "python -m", the current directory (i.e. "")
-# is automatically prepended to sys.path, but not if it is run as
-# "path/to/file.py". The processing for "-m" rewrites the former to
-# the latter, so subsequent executions won't have the same path as the
-# original.
-#
-# Conversely, when run as path/to/file.py, the directory containing
-# file.py gets added to the path, which can cause confusion as imports
-# may become relative in spite of the future import.
-#
-# We address the former problem by setting the $PYTHONPATH environment
-# variable before re-execution so the new process will see the correct
-# path. We attempt to address the latter problem when tornado.autoreload
-# is run as __main__, although we can't fix the general case because
-# we cannot reliably reconstruct the original command line
-# (http://bugs.python.org/issue14208).
-
-if __name__ == "__main__":
- # This sys.path manipulation must come before our imports (as much
- # as possible - if we introduced a tornado.sys or tornado.os
- # module we'd be in trouble), or else our imports would become
- # relative again despite the future import.
- #
- # There is a separate __main__ block at the end of the file to call main().
- if sys.path[0] == os.path.dirname(__file__):
- del sys.path[0]
-
-import functools
-import logging
-import os
-import pkgutil # type: ignore
-import sys
-import traceback
-import types
-import subprocess
-import weakref
-
-from tornado import ioloop
-from tornado.log import gen_log
-from tornado import process
-from tornado.util import exec_in
-
-try:
- import signal
-except ImportError:
- signal = None
-
-# os.execv is broken on Windows and can't properly parse command line
-# arguments and executable name if they contain whitespaces. subprocess
-# fixes that behavior.
-_has_execv = sys.platform != 'win32'
-
-_watched_files = set()
-_reload_hooks = []
-_reload_attempted = False
-_io_loops = weakref.WeakKeyDictionary() # type: ignore
-
-
-def start(io_loop=None, check_time=500):
- """Begins watching source files for changes.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- io_loop = io_loop or ioloop.IOLoop.current()
- if io_loop in _io_loops:
- return
- _io_loops[io_loop] = True
- if len(_io_loops) > 1:
- gen_log.warning("tornado.autoreload started more than once in the same process")
- modify_times = {}
- callback = functools.partial(_reload_on_update, modify_times)
- scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
- scheduler.start()
-
-
-def wait():
- """Wait for a watched file to change, then restart the process.
-
- Intended to be used at the end of scripts like unit test runners,
- to run the tests again after any source file changes (but see also
- the command-line interface in `main`)
- """
- io_loop = ioloop.IOLoop()
- start(io_loop)
- io_loop.start()
-
-
-def watch(filename):
- """Add a file to the watch list.
-
- All imported modules are watched by default.
- """
- _watched_files.add(filename)
-
-
-def add_reload_hook(fn):
- """Add a function to be called before reloading the process.
-
- Note that for open file and socket handles it is generally
- preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
- ``tornado.platform.auto.set_close_exec``) instead
- of using a reload hook to close them.
- """
- _reload_hooks.append(fn)
-
-
-def _reload_on_update(modify_times):
- if _reload_attempted:
- # We already tried to reload and it didn't work, so don't try again.
- return
- if process.task_id() is not None:
- # We're in a child process created by fork_processes. If child
- # processes restarted themselves, they'd all restart and then
- # all call fork_processes again.
- return
- for module in list(sys.modules.values()):
- # Some modules play games with sys.modules (e.g. email/__init__.py
- # in the standard library), and occasionally this can cause strange
- # failures in getattr. Just ignore anything that's not an ordinary
- # module.
- if not isinstance(module, types.ModuleType):
- continue
- path = getattr(module, "__file__", None)
- if not path:
- continue
- if path.endswith(".pyc") or path.endswith(".pyo"):
- path = path[:-1]
- _check_file(modify_times, path)
- for path in _watched_files:
- _check_file(modify_times, path)
-
-
-def _check_file(modify_times, path):
- try:
- modified = os.stat(path).st_mtime
- except Exception:
- return
- if path not in modify_times:
- modify_times[path] = modified
- return
- if modify_times[path] != modified:
- gen_log.info("%s modified; restarting server", path)
- _reload()
-
-
-def _reload():
- global _reload_attempted
- _reload_attempted = True
- for fn in _reload_hooks:
- fn()
- if hasattr(signal, "setitimer"):
- # Clear the alarm signal set by
- # ioloop.set_blocking_log_threshold so it doesn't fire
- # after the exec.
- signal.setitimer(signal.ITIMER_REAL, 0, 0)
- # sys.path fixes: see comments at top of file. If sys.path[0] is an empty
- # string, we were (probably) invoked with -m and the effective path
- # is about to change on re-exec. Add the current directory to $PYTHONPATH
- # to ensure that the new process sees the same path we did.
- path_prefix = '.' + os.pathsep
- if (sys.path[0] == '' and
- not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
- os.environ["PYTHONPATH"] = (path_prefix +
- os.environ.get("PYTHONPATH", ""))
- if not _has_execv:
- subprocess.Popen([sys.executable] + sys.argv)
- sys.exit(0)
- else:
- try:
- os.execv(sys.executable, [sys.executable] + sys.argv)
- except OSError:
- # Mac OS X versions prior to 10.6 do not support execv in
- # a process that contains multiple threads. Instead of
- # re-executing in the current process, start a new one
- # and cause the current process to exit. This isn't
- # ideal since the new process is detached from the parent
- # terminal and thus cannot easily be killed with ctrl-C,
- # but it's better than not being able to autoreload at
- # all.
- # Unfortunately the errno returned in this case does not
- # appear to be consistent, so we can't easily check for
- # this error specifically.
- os.spawnv(os.P_NOWAIT, sys.executable,
- [sys.executable] + sys.argv)
- # At this point the IOLoop has been closed and finally
- # blocks will experience errors if we allow the stack to
- # unwind, so just exit uncleanly.
- os._exit(0)
-
-
-_USAGE = """\
-Usage:
- python -m tornado.autoreload -m module.to.run [args...]
- python -m tornado.autoreload path/to/script.py [args...]
-"""
-
-
-def main():
- """Command-line wrapper to re-run a script whenever its source changes.
-
- Scripts may be specified by filename or module name::
-
- python -m tornado.autoreload -m tornado.test.runtests
- python -m tornado.autoreload tornado/test/runtests.py
-
- Running a script with this wrapper is similar to calling
- `tornado.autoreload.wait` at the end of the script, but this wrapper
- can catch import-time problems like syntax errors that would otherwise
- prevent the script from reaching its call to `wait`.
- """
- original_argv = sys.argv
- sys.argv = sys.argv[:]
- if len(sys.argv) >= 3 and sys.argv[1] == "-m":
- mode = "module"
- module = sys.argv[2]
- del sys.argv[1:3]
- elif len(sys.argv) >= 2:
- mode = "script"
- script = sys.argv[1]
- sys.argv = sys.argv[1:]
- else:
- print(_USAGE, file=sys.stderr)
- sys.exit(1)
-
- try:
- if mode == "module":
- import runpy
- runpy.run_module(module, run_name="__main__", alter_sys=True)
- elif mode == "script":
- with open(script) as f:
- # Execute the script in our namespace instead of creating
- # a new one so that something that tries to import __main__
- # (e.g. the unittest module) will see names defined in the
- # script instead of just those defined in this module.
- global __file__
- __file__ = script
- # If __package__ is defined, imports may be incorrectly
- # interpreted as relative to this module.
- global __package__
- del __package__
- exec_in(f.read(), globals(), globals())
- except SystemExit as e:
- logging.basicConfig()
- gen_log.info("Script exited with status %s", e.code)
- except Exception as e:
- logging.basicConfig()
- gen_log.warning("Script exited with uncaught exception", exc_info=True)
- # If an exception occurred at import time, the file with the error
- # never made it into sys.modules and so we won't know to watch it.
- # Just to make sure we've covered everything, walk the stack trace
- # from the exception and watch every file.
- for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
- watch(filename)
- if isinstance(e, SyntaxError):
- # SyntaxErrors are special: their innermost stack frame is fake
- # so extract_tb won't see it and we have to get the filename
- # from the exception object.
- watch(e.filename)
- else:
- logging.basicConfig()
- gen_log.info("Script exited normally")
- # restore sys.argv so subsequent executions will include autoreload
- sys.argv = original_argv
-
- if mode == 'module':
- # runpy did a fake import of the module as __main__, but now it's
- # no longer in sys.modules. Figure out where it is and watch it.
- loader = pkgutil.get_loader(module)
- if loader is not None:
- watch(loader.get_filename())
-
- wait()
-
-
-if __name__ == "__main__":
- # See also the other __main__ block at the top of the file, which modifies
- # sys.path before our imports
- main()
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Automatically restart the server when a source file is modified.
+
+Most applications should not access this module directly. Instead,
+pass the keyword argument ``autoreload=True`` to the
+`tornado.web.Application` constructor (or ``debug=True``, which
+enables this setting and several others). This will enable autoreload
+mode as well as checking for changes to templates and static
+resources. Note that restarting is a destructive operation and any
+requests in progress will be aborted when the process restarts. (If
+you want to disable autoreload while using other debug-mode features,
+pass both ``debug=True`` and ``autoreload=False``).
+
+This module can also be used as a command-line wrapper around scripts
+such as unit test runners. See the `main` method for details.
+
+The command-line wrapper and Application debug modes can be used together.
+This combination is encouraged as the wrapper catches syntax errors and
+other import-time failures, while debug mode catches changes once
+the server has started.
+
+This module depends on `.IOLoop`, so it will not work in WSGI applications
+and Google App Engine. It also will not work correctly when `.HTTPServer`'s
+multi-process mode is used.
+
+Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
+because it re-executes Python using ``sys.executable`` and ``sys.argv``.
+Additionally, modifying these variables will cause reloading to behave
+incorrectly.
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import os
+import sys
+
+# sys.path handling
+# -----------------
+#
+# If a module is run with "python -m", the current directory (i.e. "")
+# is automatically prepended to sys.path, but not if it is run as
+# "path/to/file.py". The processing for "-m" rewrites the former to
+# the latter, so subsequent executions won't have the same path as the
+# original.
+#
+# Conversely, when run as path/to/file.py, the directory containing
+# file.py gets added to the path, which can cause confusion as imports
+# may become relative in spite of the future import.
+#
+# We address the former problem by setting the $PYTHONPATH environment
+# variable before re-execution so the new process will see the correct
+# path. We attempt to address the latter problem when tornado.autoreload
+# is run as __main__, although we can't fix the general case because
+# we cannot reliably reconstruct the original command line
+# (http://bugs.python.org/issue14208).
+
+if __name__ == "__main__":
+ # This sys.path manipulation must come before our imports (as much
+ # as possible - if we introduced a tornado.sys or tornado.os
+ # module we'd be in trouble), or else our imports would become
+ # relative again despite the future import.
+ #
+ # There is a separate __main__ block at the end of the file to call main().
+ if sys.path[0] == os.path.dirname(__file__):
+ del sys.path[0]
+
+import functools
+import logging
+import os
+import pkgutil # type: ignore
+import sys
+import traceback
+import types
+import subprocess
+import weakref
+
+from tornado import ioloop
+from tornado.log import gen_log
+from tornado import process
+from tornado.util import exec_in
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+# os.execv is broken on Windows and can't properly parse command line
+# arguments and executable name if they contain whitespaces. subprocess
+# fixes that behavior.
+_has_execv = sys.platform != 'win32'
+
+_watched_files = set()
+_reload_hooks = []
+_reload_attempted = False
+_io_loops = weakref.WeakKeyDictionary() # type: ignore
+
+
+def start(io_loop=None, check_time=500):
+ """Begins watching source files for changes.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ io_loop = io_loop or ioloop.IOLoop.current()
+ if io_loop in _io_loops:
+ return
+ _io_loops[io_loop] = True
+ if len(_io_loops) > 1:
+ gen_log.warning("tornado.autoreload started more than once in the same process")
+ modify_times = {}
+ callback = functools.partial(_reload_on_update, modify_times)
+ scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
+ scheduler.start()
+
+
+def wait():
+ """Wait for a watched file to change, then restart the process.
+
+ Intended to be used at the end of scripts like unit test runners,
+ to run the tests again after any source file changes (but see also
+ the command-line interface in `main`)
+ """
+ io_loop = ioloop.IOLoop()
+ start(io_loop)
+ io_loop.start()
+
+
+def watch(filename):
+ """Add a file to the watch list.
+
+ All imported modules are watched by default.
+ """
+ _watched_files.add(filename)
+
+
+def add_reload_hook(fn):
+ """Add a function to be called before reloading the process.
+
+ Note that for open file and socket handles it is generally
+ preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
+ ``tornado.platform.auto.set_close_exec``) instead
+ of using a reload hook to close them.
+ """
+ _reload_hooks.append(fn)
+
+
+def _reload_on_update(modify_times):
+ if _reload_attempted:
+ # We already tried to reload and it didn't work, so don't try again.
+ return
+ if process.task_id() is not None:
+ # We're in a child process created by fork_processes. If child
+ # processes restarted themselves, they'd all restart and then
+ # all call fork_processes again.
+ return
+ for module in list(sys.modules.values()):
+ # Some modules play games with sys.modules (e.g. email/__init__.py
+ # in the standard library), and occasionally this can cause strange
+ # failures in getattr. Just ignore anything that's not an ordinary
+ # module.
+ if not isinstance(module, types.ModuleType):
+ continue
+ path = getattr(module, "__file__", None)
+ if not path:
+ continue
+ if path.endswith(".pyc") or path.endswith(".pyo"):
+ path = path[:-1]
+ _check_file(modify_times, path)
+ for path in _watched_files:
+ _check_file(modify_times, path)
+
+
+def _check_file(modify_times, path):
+ try:
+ modified = os.stat(path).st_mtime
+ except Exception:
+ return
+ if path not in modify_times:
+ modify_times[path] = modified
+ return
+ if modify_times[path] != modified:
+ gen_log.info("%s modified; restarting server", path)
+ _reload()
+
+
+def _reload():
+ global _reload_attempted
+ _reload_attempted = True
+ for fn in _reload_hooks:
+ fn()
+ if hasattr(signal, "setitimer"):
+ # Clear the alarm signal set by
+ # ioloop.set_blocking_log_threshold so it doesn't fire
+ # after the exec.
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+ # sys.path fixes: see comments at top of file. If sys.path[0] is an empty
+ # string, we were (probably) invoked with -m and the effective path
+ # is about to change on re-exec. Add the current directory to $PYTHONPATH
+ # to ensure that the new process sees the same path we did.
+ path_prefix = '.' + os.pathsep
+ if (sys.path[0] == '' and
+ not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
+ os.environ["PYTHONPATH"] = (path_prefix +
+ os.environ.get("PYTHONPATH", ""))
+ if not _has_execv:
+ subprocess.Popen([sys.executable] + sys.argv)
+ sys.exit(0)
+ else:
+ try:
+ os.execv(sys.executable, [sys.executable] + sys.argv)
+ except OSError:
+ # Mac OS X versions prior to 10.6 do not support execv in
+ # a process that contains multiple threads. Instead of
+ # re-executing in the current process, start a new one
+ # and cause the current process to exit. This isn't
+ # ideal since the new process is detached from the parent
+ # terminal and thus cannot easily be killed with ctrl-C,
+ # but it's better than not being able to autoreload at
+ # all.
+ # Unfortunately the errno returned in this case does not
+ # appear to be consistent, so we can't easily check for
+ # this error specifically.
+ os.spawnv(os.P_NOWAIT, sys.executable,
+ [sys.executable] + sys.argv)
+ # At this point the IOLoop has been closed and finally
+ # blocks will experience errors if we allow the stack to
+ # unwind, so just exit uncleanly.
+ os._exit(0)
+
+
+_USAGE = """\
+Usage:
+ python -m tornado.autoreload -m module.to.run [args...]
+ python -m tornado.autoreload path/to/script.py [args...]
+"""
+
+
+def main():
+ """Command-line wrapper to re-run a script whenever its source changes.
+
+ Scripts may be specified by filename or module name::
+
+ python -m tornado.autoreload -m tornado.test.runtests
+ python -m tornado.autoreload tornado/test/runtests.py
+
+ Running a script with this wrapper is similar to calling
+ `tornado.autoreload.wait` at the end of the script, but this wrapper
+ can catch import-time problems like syntax errors that would otherwise
+ prevent the script from reaching its call to `wait`.
+ """
+ original_argv = sys.argv
+ sys.argv = sys.argv[:]
+ if len(sys.argv) >= 3 and sys.argv[1] == "-m":
+ mode = "module"
+ module = sys.argv[2]
+ del sys.argv[1:3]
+ elif len(sys.argv) >= 2:
+ mode = "script"
+ script = sys.argv[1]
+ sys.argv = sys.argv[1:]
+ else:
+ print(_USAGE, file=sys.stderr)
+ sys.exit(1)
+
+ try:
+ if mode == "module":
+ import runpy
+ runpy.run_module(module, run_name="__main__", alter_sys=True)
+ elif mode == "script":
+ with open(script) as f:
+ # Execute the script in our namespace instead of creating
+ # a new one so that something that tries to import __main__
+ # (e.g. the unittest module) will see names defined in the
+ # script instead of just those defined in this module.
+ global __file__
+ __file__ = script
+ # If __package__ is defined, imports may be incorrectly
+ # interpreted as relative to this module.
+ global __package__
+ del __package__
+ exec_in(f.read(), globals(), globals())
+ except SystemExit as e:
+ logging.basicConfig()
+ gen_log.info("Script exited with status %s", e.code)
+ except Exception as e:
+ logging.basicConfig()
+ gen_log.warning("Script exited with uncaught exception", exc_info=True)
+ # If an exception occurred at import time, the file with the error
+ # never made it into sys.modules and so we won't know to watch it.
+ # Just to make sure we've covered everything, walk the stack trace
+ # from the exception and watch every file.
+ for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]):
+ watch(filename)
+ if isinstance(e, SyntaxError):
+ # SyntaxErrors are special: their innermost stack frame is fake
+ # so extract_tb won't see it and we have to get the filename
+ # from the exception object.
+ watch(e.filename)
+ else:
+ logging.basicConfig()
+ gen_log.info("Script exited normally")
+ # restore sys.argv so subsequent executions will include autoreload
+ sys.argv = original_argv
+
+ if mode == 'module':
+ # runpy did a fake import of the module as __main__, but now it's
+ # no longer in sys.modules. Figure out where it is and watch it.
+ loader = pkgutil.get_loader(module)
+ if loader is not None:
+ watch(loader.get_filename())
+
+ wait()
+
+
+if __name__ == "__main__":
+ # See also the other __main__ block at the top of the file, which modifies
+ # sys.path before our imports
+ main()
diff --git a/contrib/python/tornado/tornado-4/tornado/concurrent.py b/contrib/python/tornado/tornado-4/tornado/concurrent.py
index 667e6b1788..dc82ff0009 100644
--- a/contrib/python/tornado/tornado-4/tornado/concurrent.py
+++ b/contrib/python/tornado/tornado-4/tornado/concurrent.py
@@ -1,521 +1,521 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Utilities for working with threads and ``Futures``.
-
-``Futures`` are a pattern for concurrent programming introduced in
-Python 3.2 in the `concurrent.futures` package. This package defines
-a mostly-compatible `Future` class designed for use from coroutines,
-as well as some utility functions for interacting with the
-`concurrent.futures` package.
-"""
-from __future__ import absolute_import, division, print_function
-
-import functools
-import platform
-import textwrap
-import traceback
-import sys
-
-from tornado.log import app_log
-from tornado.stack_context import ExceptionStackContext, wrap
-from tornado.util import raise_exc_info, ArgReplacer, is_finalizing
-
-try:
- from concurrent import futures
-except ImportError:
- futures = None
-
-try:
- import typing
-except ImportError:
- typing = None
-
-
-# Can the garbage collector handle cycles that include __del__ methods?
-# This is true in cpython beginning with version 3.4 (PEP 442).
-_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
- sys.version_info >= (3, 4))
-
-
-class ReturnValueIgnoredError(Exception):
- pass
-
-# This class and associated code in the future object is derived
-# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
-
-
-class _TracebackLogger(object):
- """Helper to log a traceback upon destruction if not cleared.
-
- This solves a nasty problem with Futures and Tasks that have an
- exception set: if nobody asks for the exception, the exception is
- never logged. This violates the Zen of Python: 'Errors should
- never pass silently. Unless explicitly silenced.'
-
- However, we don't want to log the exception as soon as
- set_exception() is called: if the calling code is written
- properly, it will get the exception and handle it properly. But
- we *do* want to log it if result() or exception() was never called
- -- otherwise developers waste a lot of time wondering why their
- buggy code fails silently.
-
- An earlier attempt added a __del__() method to the Future class
- itself, but this backfired because the presence of __del__()
- prevents garbage collection from breaking cycles. A way out of
- this catch-22 is to avoid having a __del__() method on the Future
- class itself, but instead to have a reference to a helper object
- with a __del__() method that logs the traceback, where we ensure
- that the helper object doesn't participate in cycles, and only the
- Future has a reference to it.
-
- The helper object is added when set_exception() is called. When
- the Future is collected, and the helper is present, the helper
- object is also collected, and its __del__() method will log the
- traceback. When the Future's result() or exception() method is
- called (and a helper object is present), it removes the the helper
- object, after calling its clear() method to prevent it from
- logging.
-
- One downside is that we do a fair amount of work to extract the
- traceback from the exception, even when it is never logged. It
- would seem cheaper to just store the exception object, but that
- references the traceback, which references stack frames, which may
- reference the Future, which references the _TracebackLogger, and
- then the _TracebackLogger would be included in a cycle, which is
- what we're trying to avoid! As an optimization, we don't
- immediately format the exception; we only do the work when
- activate() is called, which call is delayed until after all the
- Future's callbacks have run. Since usually a Future has at least
- one callback (typically set by 'yield From') and usually that
- callback extracts the callback, thereby removing the need to
- format the exception.
-
- PS. I don't claim credit for this solution. I first heard of it
- in a discussion about closing files when they are collected.
- """
-
- __slots__ = ('exc_info', 'formatted_tb')
-
- def __init__(self, exc_info):
- self.exc_info = exc_info
- self.formatted_tb = None
-
- def activate(self):
- exc_info = self.exc_info
- if exc_info is not None:
- self.exc_info = None
- self.formatted_tb = traceback.format_exception(*exc_info)
-
- def clear(self):
- self.exc_info = None
- self.formatted_tb = None
-
- def __del__(self, is_finalizing=is_finalizing):
- if not is_finalizing() and self.formatted_tb:
- app_log.error('Future exception was never retrieved: %s',
- ''.join(self.formatted_tb).rstrip())
-
-
-class Future(object):
- """Placeholder for an asynchronous result.
-
- A ``Future`` encapsulates the result of an asynchronous
- operation. In synchronous applications ``Futures`` are used
- to wait for the result from a thread or process pool; in
- Tornado they are normally used with `.IOLoop.add_future` or by
- yielding them in a `.gen.coroutine`.
-
- `tornado.concurrent.Future` is similar to
- `concurrent.futures.Future`, but not thread-safe (and therefore
- faster for use with single-threaded event loops).
-
- In addition to ``exception`` and ``set_exception``, methods ``exc_info``
- and ``set_exc_info`` are supported to capture tracebacks in Python 2.
- The traceback is automatically available in Python 3, but in the
- Python 2 futures backport this information is discarded.
- This functionality was previously available in a separate class
- ``TracebackFuture``, which is now a deprecated alias for this class.
-
- .. versionchanged:: 4.0
- `tornado.concurrent.Future` is always a thread-unsafe ``Future``
- with support for the ``exc_info`` methods. Previously it would
- be an alias for the thread-safe `concurrent.futures.Future`
- if that package was available and fall back to the thread-unsafe
- implementation if it was not.
-
- .. versionchanged:: 4.1
- If a `.Future` contains an error but that error is never observed
- (by calling ``result()``, ``exception()``, or ``exc_info()``),
- a stack trace will be logged when the `.Future` is garbage collected.
- This normally indicates an error in the application, but in cases
- where it results in undesired logging it may be necessary to
- suppress the logging by ensuring that the exception is observed:
- ``f.add_done_callback(lambda f: f.exception())``.
- """
- def __init__(self):
- self._done = False
- self._result = None
- self._exc_info = None
-
- self._log_traceback = False # Used for Python >= 3.4
- self._tb_logger = None # Used for Python <= 3.3
-
- self._callbacks = []
-
- # Implement the Python 3.5 Awaitable protocol if possible
- # (we can't use return and yield together until py33).
- if sys.version_info >= (3, 3):
- exec(textwrap.dedent("""
- def __await__(self):
- return (yield self)
- """))
- else:
- # Py2-compatible version for use with cython.
- def __await__(self):
- result = yield self
- # StopIteration doesn't take args before py33,
- # but Cython recognizes the args tuple.
- e = StopIteration()
- e.args = (result,)
- raise e
-
- def cancel(self):
- """Cancel the operation, if possible.
-
- Tornado ``Futures`` do not support cancellation, so this method always
- returns False.
- """
- return False
-
- def cancelled(self):
- """Returns True if the operation has been cancelled.
-
- Tornado ``Futures`` do not support cancellation, so this method
- always returns False.
- """
- return False
-
- def running(self):
- """Returns True if this operation is currently running."""
- return not self._done
-
- def done(self):
- """Returns True if the future has finished running."""
- return self._done
-
- def _clear_tb_log(self):
- self._log_traceback = False
- if self._tb_logger is not None:
- self._tb_logger.clear()
- self._tb_logger = None
-
- def result(self, timeout=None):
- """If the operation succeeded, return its result. If it failed,
- re-raise its exception.
-
- This method takes a ``timeout`` argument for compatibility with
- `concurrent.futures.Future` but it is an error to call it
- before the `Future` is done, so the ``timeout`` is never used.
- """
- self._clear_tb_log()
- if self._result is not None:
- return self._result
- if self._exc_info is not None:
- try:
- raise_exc_info(self._exc_info)
- finally:
- self = None
- self._check_done()
- return self._result
-
- def exception(self, timeout=None):
- """If the operation raised an exception, return the `Exception`
- object. Otherwise returns None.
-
- This method takes a ``timeout`` argument for compatibility with
- `concurrent.futures.Future` but it is an error to call it
- before the `Future` is done, so the ``timeout`` is never used.
- """
- self._clear_tb_log()
- if self._exc_info is not None:
- return self._exc_info[1]
- else:
- self._check_done()
- return None
-
- def add_done_callback(self, fn):
- """Attaches the given callback to the `Future`.
-
- It will be invoked with the `Future` as its argument when the Future
- has finished running and its result is available. In Tornado
- consider using `.IOLoop.add_future` instead of calling
- `add_done_callback` directly.
- """
- if self._done:
- fn(self)
- else:
- self._callbacks.append(fn)
-
- def set_result(self, result):
- """Sets the result of a ``Future``.
-
- It is undefined to call any of the ``set`` methods more than once
- on the same object.
- """
- self._result = result
- self._set_done()
-
- def set_exception(self, exception):
- """Sets the exception of a ``Future.``"""
- self.set_exc_info(
- (exception.__class__,
- exception,
- getattr(exception, '__traceback__', None)))
-
- def exc_info(self):
- """Returns a tuple in the same format as `sys.exc_info` or None.
-
- .. versionadded:: 4.0
- """
- self._clear_tb_log()
- return self._exc_info
-
- def set_exc_info(self, exc_info):
- """Sets the exception information of a ``Future.``
-
- Preserves tracebacks on Python 2.
-
- .. versionadded:: 4.0
- """
- self._exc_info = exc_info
- self._log_traceback = True
- if not _GC_CYCLE_FINALIZERS:
- self._tb_logger = _TracebackLogger(exc_info)
-
- try:
- self._set_done()
- finally:
- # Activate the logger after all callbacks have had a
- # chance to call result() or exception().
- if self._log_traceback and self._tb_logger is not None:
- self._tb_logger.activate()
- self._exc_info = exc_info
-
- def _check_done(self):
- if not self._done:
- raise Exception("DummyFuture does not support blocking for results")
-
- def _set_done(self):
- self._done = True
- for cb in self._callbacks:
- try:
- cb(self)
- except Exception:
- app_log.exception('Exception in callback %r for %r',
- cb, self)
- self._callbacks = None
-
- # On Python 3.3 or older, objects with a destructor part of a reference
- # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
- # the PEP 442.
- if _GC_CYCLE_FINALIZERS:
- def __del__(self, is_finalizing=is_finalizing):
- if is_finalizing() or not self._log_traceback:
- # set_exception() was not called, or result() or exception()
- # has consumed the exception
- return
-
- tb = traceback.format_exception(*self._exc_info)
-
- app_log.error('Future %r exception was never retrieved: %s',
- self, ''.join(tb).rstrip())
-
-
-TracebackFuture = Future
-
-if futures is None:
- FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]]
-else:
- FUTURES = (futures.Future, Future)
-
-
-def is_future(x):
- return isinstance(x, FUTURES)
-
-
-class DummyExecutor(object):
- def submit(self, fn, *args, **kwargs):
- future = TracebackFuture()
- try:
- future.set_result(fn(*args, **kwargs))
- except Exception:
- future.set_exc_info(sys.exc_info())
- return future
-
- def shutdown(self, wait=True):
- pass
-
-
-dummy_executor = DummyExecutor()
-
-
-def run_on_executor(*args, **kwargs):
- """Decorator to run a synchronous method asynchronously on an executor.
-
- The decorated method may be called with a ``callback`` keyword
- argument and returns a future.
-
- The `.IOLoop` and executor to be used are determined by the ``io_loop``
- and ``executor`` attributes of ``self``. To use different attributes,
- pass keyword arguments to the decorator::
-
- @run_on_executor(executor='_thread_pool')
- def foo(self):
- pass
-
- .. versionchanged:: 4.2
- Added keyword arguments to use alternative attributes.
- """
- def run_on_executor_decorator(fn):
- executor = kwargs.get("executor", "executor")
- io_loop = kwargs.get("io_loop", "io_loop")
-
- @functools.wraps(fn)
- def wrapper(self, *args, **kwargs):
- callback = kwargs.pop("callback", None)
- future = getattr(self, executor).submit(fn, self, *args, **kwargs)
- if callback:
- getattr(self, io_loop).add_future(
- future, lambda future: callback(future.result()))
- return future
- return wrapper
- if args and kwargs:
- raise ValueError("cannot combine positional and keyword args")
- if len(args) == 1:
- return run_on_executor_decorator(args[0])
- elif len(args) != 0:
- raise ValueError("expected 1 argument, got %d", len(args))
- return run_on_executor_decorator
-
-
-_NO_RESULT = object()
-
-
-def return_future(f):
- """Decorator to make a function that returns via callback return a
- `Future`.
-
- The wrapped function should take a ``callback`` keyword argument
- and invoke it with one argument when it has finished. To signal failure,
- the function can simply raise an exception (which will be
- captured by the `.StackContext` and passed along to the ``Future``).
-
- From the caller's perspective, the callback argument is optional.
- If one is given, it will be invoked when the function is complete
- with `Future.result()` as an argument. If the function fails, the
- callback will not be run and an exception will be raised into the
- surrounding `.StackContext`.
-
- If no callback is given, the caller should use the ``Future`` to
- wait for the function to complete (perhaps by yielding it in a
- `.gen.engine` function, or passing it to `.IOLoop.add_future`).
-
- Usage:
-
- .. testcode::
-
- @return_future
- def future_func(arg1, arg2, callback):
- # Do stuff (possibly asynchronous)
- callback(result)
-
- @gen.engine
- def caller(callback):
- yield future_func(arg1, arg2)
- callback()
-
- ..
-
- Note that ``@return_future`` and ``@gen.engine`` can be applied to the
- same function, provided ``@return_future`` appears first. However,
- consider using ``@gen.coroutine`` instead of this combination.
- """
- replacer = ArgReplacer(f, 'callback')
-
- @functools.wraps(f)
- def wrapper(*args, **kwargs):
- future = TracebackFuture()
- callback, args, kwargs = replacer.replace(
- lambda value=_NO_RESULT: future.set_result(value),
- args, kwargs)
-
- def handle_error(typ, value, tb):
- future.set_exc_info((typ, value, tb))
- return True
- exc_info = None
- with ExceptionStackContext(handle_error):
- try:
- result = f(*args, **kwargs)
- if result is not None:
- raise ReturnValueIgnoredError(
- "@return_future should not be used with functions "
- "that return values")
- except:
- exc_info = sys.exc_info()
- raise
- if exc_info is not None:
- # If the initial synchronous part of f() raised an exception,
- # go ahead and raise it to the caller directly without waiting
- # for them to inspect the Future.
- future.result()
-
- # If the caller passed in a callback, schedule it to be called
- # when the future resolves. It is important that this happens
- # just before we return the future, or else we risk confusing
- # stack contexts with multiple exceptions (one here with the
- # immediate exception, and again when the future resolves and
- # the callback triggers its exception by calling future.result()).
- if callback is not None:
- def run_callback(future):
- result = future.result()
- if result is _NO_RESULT:
- callback()
- else:
- callback(future.result())
- future.add_done_callback(wrap(run_callback))
- return future
- return wrapper
-
-
-def chain_future(a, b):
- """Chain two futures together so that when one completes, so does the other.
-
- The result (success or failure) of ``a`` will be copied to ``b``, unless
- ``b`` has already been completed or cancelled by the time ``a`` finishes.
- """
- def copy(future):
- assert future is a
- if b.done():
- return
- if (isinstance(a, TracebackFuture) and
- isinstance(b, TracebackFuture) and
- a.exc_info() is not None):
- b.set_exc_info(a.exc_info())
- elif a.exception() is not None:
- b.set_exception(a.exception())
- else:
- b.set_result(a.result())
- a.add_done_callback(copy)
+#!/usr/bin/env python
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Utilities for working with threads and ``Futures``.
+
+``Futures`` are a pattern for concurrent programming introduced in
+Python 3.2 in the `concurrent.futures` package. This package defines
+a mostly-compatible `Future` class designed for use from coroutines,
+as well as some utility functions for interacting with the
+`concurrent.futures` package.
+"""
+from __future__ import absolute_import, division, print_function
+
+import functools
+import platform
+import textwrap
+import traceback
+import sys
+
+from tornado.log import app_log
+from tornado.stack_context import ExceptionStackContext, wrap
+from tornado.util import raise_exc_info, ArgReplacer, is_finalizing
+
+try:
+ from concurrent import futures
+except ImportError:
+ futures = None
+
+try:
+ import typing
+except ImportError:
+ typing = None
+
+
+# Can the garbage collector handle cycles that include __del__ methods?
+# This is true in cpython beginning with version 3.4 (PEP 442).
+_GC_CYCLE_FINALIZERS = (platform.python_implementation() == 'CPython' and
+ sys.version_info >= (3, 4))
+
+
+class ReturnValueIgnoredError(Exception):
+ pass
+
+# This class and associated code in the future object is derived
+# from the Trollius project, a backport of asyncio to Python 2.x - 3.x
+
+
+class _TracebackLogger(object):
+ """Helper to log a traceback upon destruction if not cleared.
+
+ This solves a nasty problem with Futures and Tasks that have an
+ exception set: if nobody asks for the exception, the exception is
+ never logged. This violates the Zen of Python: 'Errors should
+ never pass silently. Unless explicitly silenced.'
+
+ However, we don't want to log the exception as soon as
+ set_exception() is called: if the calling code is written
+ properly, it will get the exception and handle it properly. But
+ we *do* want to log it if result() or exception() was never called
+ -- otherwise developers waste a lot of time wondering why their
+ buggy code fails silently.
+
+ An earlier attempt added a __del__() method to the Future class
+ itself, but this backfired because the presence of __del__()
+ prevents garbage collection from breaking cycles. A way out of
+ this catch-22 is to avoid having a __del__() method on the Future
+ class itself, but instead to have a reference to a helper object
+ with a __del__() method that logs the traceback, where we ensure
+ that the helper object doesn't participate in cycles, and only the
+ Future has a reference to it.
+
+ The helper object is added when set_exception() is called. When
+ the Future is collected, and the helper is present, the helper
+ object is also collected, and its __del__() method will log the
+ traceback. When the Future's result() or exception() method is
+ called (and a helper object is present), it removes the the helper
+ object, after calling its clear() method to prevent it from
+ logging.
+
+ One downside is that we do a fair amount of work to extract the
+ traceback from the exception, even when it is never logged. It
+ would seem cheaper to just store the exception object, but that
+ references the traceback, which references stack frames, which may
+ reference the Future, which references the _TracebackLogger, and
+ then the _TracebackLogger would be included in a cycle, which is
+ what we're trying to avoid! As an optimization, we don't
+ immediately format the exception; we only do the work when
+ activate() is called, which call is delayed until after all the
+ Future's callbacks have run. Since usually a Future has at least
+ one callback (typically set by 'yield From') and usually that
+ callback extracts the callback, thereby removing the need to
+ format the exception.
+
+ PS. I don't claim credit for this solution. I first heard of it
+ in a discussion about closing files when they are collected.
+ """
+
+ __slots__ = ('exc_info', 'formatted_tb')
+
+ def __init__(self, exc_info):
+ self.exc_info = exc_info
+ self.formatted_tb = None
+
+ def activate(self):
+ exc_info = self.exc_info
+ if exc_info is not None:
+ self.exc_info = None
+ self.formatted_tb = traceback.format_exception(*exc_info)
+
+ def clear(self):
+ self.exc_info = None
+ self.formatted_tb = None
+
+ def __del__(self, is_finalizing=is_finalizing):
+ if not is_finalizing() and self.formatted_tb:
+ app_log.error('Future exception was never retrieved: %s',
+ ''.join(self.formatted_tb).rstrip())
+
+
+class Future(object):
+ """Placeholder for an asynchronous result.
+
+ A ``Future`` encapsulates the result of an asynchronous
+ operation. In synchronous applications ``Futures`` are used
+ to wait for the result from a thread or process pool; in
+ Tornado they are normally used with `.IOLoop.add_future` or by
+ yielding them in a `.gen.coroutine`.
+
+ `tornado.concurrent.Future` is similar to
+ `concurrent.futures.Future`, but not thread-safe (and therefore
+ faster for use with single-threaded event loops).
+
+ In addition to ``exception`` and ``set_exception``, methods ``exc_info``
+ and ``set_exc_info`` are supported to capture tracebacks in Python 2.
+ The traceback is automatically available in Python 3, but in the
+ Python 2 futures backport this information is discarded.
+ This functionality was previously available in a separate class
+ ``TracebackFuture``, which is now a deprecated alias for this class.
+
+ .. versionchanged:: 4.0
+ `tornado.concurrent.Future` is always a thread-unsafe ``Future``
+ with support for the ``exc_info`` methods. Previously it would
+ be an alias for the thread-safe `concurrent.futures.Future`
+ if that package was available and fall back to the thread-unsafe
+ implementation if it was not.
+
+ .. versionchanged:: 4.1
+ If a `.Future` contains an error but that error is never observed
+ (by calling ``result()``, ``exception()``, or ``exc_info()``),
+ a stack trace will be logged when the `.Future` is garbage collected.
+ This normally indicates an error in the application, but in cases
+ where it results in undesired logging it may be necessary to
+ suppress the logging by ensuring that the exception is observed:
+ ``f.add_done_callback(lambda f: f.exception())``.
+ """
+ def __init__(self):
+ self._done = False
+ self._result = None
+ self._exc_info = None
+
+ self._log_traceback = False # Used for Python >= 3.4
+ self._tb_logger = None # Used for Python <= 3.3
+
+ self._callbacks = []
+
+ # Implement the Python 3.5 Awaitable protocol if possible
+ # (we can't use return and yield together until py33).
+ if sys.version_info >= (3, 3):
+ exec(textwrap.dedent("""
+ def __await__(self):
+ return (yield self)
+ """))
+ else:
+ # Py2-compatible version for use with cython.
+ def __await__(self):
+ result = yield self
+ # StopIteration doesn't take args before py33,
+ # but Cython recognizes the args tuple.
+ e = StopIteration()
+ e.args = (result,)
+ raise e
+
+ def cancel(self):
+ """Cancel the operation, if possible.
+
+ Tornado ``Futures`` do not support cancellation, so this method always
+ returns False.
+ """
+ return False
+
+ def cancelled(self):
+ """Returns True if the operation has been cancelled.
+
+ Tornado ``Futures`` do not support cancellation, so this method
+ always returns False.
+ """
+ return False
+
+ def running(self):
+ """Returns True if this operation is currently running."""
+ return not self._done
+
+ def done(self):
+ """Returns True if the future has finished running."""
+ return self._done
+
+ def _clear_tb_log(self):
+ self._log_traceback = False
+ if self._tb_logger is not None:
+ self._tb_logger.clear()
+ self._tb_logger = None
+
+ def result(self, timeout=None):
+ """If the operation succeeded, return its result. If it failed,
+ re-raise its exception.
+
+ This method takes a ``timeout`` argument for compatibility with
+ `concurrent.futures.Future` but it is an error to call it
+ before the `Future` is done, so the ``timeout`` is never used.
+ """
+ self._clear_tb_log()
+ if self._result is not None:
+ return self._result
+ if self._exc_info is not None:
+ try:
+ raise_exc_info(self._exc_info)
+ finally:
+ self = None
+ self._check_done()
+ return self._result
+
+ def exception(self, timeout=None):
+ """If the operation raised an exception, return the `Exception`
+ object. Otherwise returns None.
+
+ This method takes a ``timeout`` argument for compatibility with
+ `concurrent.futures.Future` but it is an error to call it
+ before the `Future` is done, so the ``timeout`` is never used.
+ """
+ self._clear_tb_log()
+ if self._exc_info is not None:
+ return self._exc_info[1]
+ else:
+ self._check_done()
+ return None
+
+ def add_done_callback(self, fn):
+ """Attaches the given callback to the `Future`.
+
+ It will be invoked with the `Future` as its argument when the Future
+ has finished running and its result is available. In Tornado
+ consider using `.IOLoop.add_future` instead of calling
+ `add_done_callback` directly.
+ """
+ if self._done:
+ fn(self)
+ else:
+ self._callbacks.append(fn)
+
+ def set_result(self, result):
+ """Sets the result of a ``Future``.
+
+ It is undefined to call any of the ``set`` methods more than once
+ on the same object.
+ """
+ self._result = result
+ self._set_done()
+
+ def set_exception(self, exception):
+ """Sets the exception of a ``Future.``"""
+ self.set_exc_info(
+ (exception.__class__,
+ exception,
+ getattr(exception, '__traceback__', None)))
+
+ def exc_info(self):
+ """Returns a tuple in the same format as `sys.exc_info` or None.
+
+ .. versionadded:: 4.0
+ """
+ self._clear_tb_log()
+ return self._exc_info
+
+ def set_exc_info(self, exc_info):
+ """Sets the exception information of a ``Future.``
+
+ Preserves tracebacks on Python 2.
+
+ .. versionadded:: 4.0
+ """
+ self._exc_info = exc_info
+ self._log_traceback = True
+ if not _GC_CYCLE_FINALIZERS:
+ self._tb_logger = _TracebackLogger(exc_info)
+
+ try:
+ self._set_done()
+ finally:
+ # Activate the logger after all callbacks have had a
+ # chance to call result() or exception().
+ if self._log_traceback and self._tb_logger is not None:
+ self._tb_logger.activate()
+ self._exc_info = exc_info
+
+ def _check_done(self):
+ if not self._done:
+ raise Exception("DummyFuture does not support blocking for results")
+
+ def _set_done(self):
+ self._done = True
+ for cb in self._callbacks:
+ try:
+ cb(self)
+ except Exception:
+ app_log.exception('Exception in callback %r for %r',
+ cb, self)
+ self._callbacks = None
+
+ # On Python 3.3 or older, objects with a destructor part of a reference
+ # cycle are never destroyed. It's no longer the case on Python 3.4 thanks to
+ # the PEP 442.
+ if _GC_CYCLE_FINALIZERS:
+ def __del__(self, is_finalizing=is_finalizing):
+ if is_finalizing() or not self._log_traceback:
+ # set_exception() was not called, or result() or exception()
+ # has consumed the exception
+ return
+
+ tb = traceback.format_exception(*self._exc_info)
+
+ app_log.error('Future %r exception was never retrieved: %s',
+ self, ''.join(tb).rstrip())
+
+
+TracebackFuture = Future
+
+if futures is None:
+ FUTURES = Future # type: typing.Union[type, typing.Tuple[type, ...]]
+else:
+ FUTURES = (futures.Future, Future)
+
+
+def is_future(x):
+ return isinstance(x, FUTURES)
+
+
+class DummyExecutor(object):
+ def submit(self, fn, *args, **kwargs):
+ future = TracebackFuture()
+ try:
+ future.set_result(fn(*args, **kwargs))
+ except Exception:
+ future.set_exc_info(sys.exc_info())
+ return future
+
+ def shutdown(self, wait=True):
+ pass
+
+
+dummy_executor = DummyExecutor()
+
+
+def run_on_executor(*args, **kwargs):
+ """Decorator to run a synchronous method asynchronously on an executor.
+
+ The decorated method may be called with a ``callback`` keyword
+ argument and returns a future.
+
+ The `.IOLoop` and executor to be used are determined by the ``io_loop``
+ and ``executor`` attributes of ``self``. To use different attributes,
+ pass keyword arguments to the decorator::
+
+ @run_on_executor(executor='_thread_pool')
+ def foo(self):
+ pass
+
+ .. versionchanged:: 4.2
+ Added keyword arguments to use alternative attributes.
+ """
+ def run_on_executor_decorator(fn):
+ executor = kwargs.get("executor", "executor")
+ io_loop = kwargs.get("io_loop", "io_loop")
+
+ @functools.wraps(fn)
+ def wrapper(self, *args, **kwargs):
+ callback = kwargs.pop("callback", None)
+ future = getattr(self, executor).submit(fn, self, *args, **kwargs)
+ if callback:
+ getattr(self, io_loop).add_future(
+ future, lambda future: callback(future.result()))
+ return future
+ return wrapper
+ if args and kwargs:
+ raise ValueError("cannot combine positional and keyword args")
+ if len(args) == 1:
+ return run_on_executor_decorator(args[0])
+ elif len(args) != 0:
+ raise ValueError("expected 1 argument, got %d", len(args))
+ return run_on_executor_decorator
+
+
+_NO_RESULT = object()
+
+
+def return_future(f):
+ """Decorator to make a function that returns via callback return a
+ `Future`.
+
+ The wrapped function should take a ``callback`` keyword argument
+ and invoke it with one argument when it has finished. To signal failure,
+ the function can simply raise an exception (which will be
+ captured by the `.StackContext` and passed along to the ``Future``).
+
+ From the caller's perspective, the callback argument is optional.
+ If one is given, it will be invoked when the function is complete
+ with `Future.result()` as an argument. If the function fails, the
+ callback will not be run and an exception will be raised into the
+ surrounding `.StackContext`.
+
+ If no callback is given, the caller should use the ``Future`` to
+ wait for the function to complete (perhaps by yielding it in a
+ `.gen.engine` function, or passing it to `.IOLoop.add_future`).
+
+ Usage:
+
+ .. testcode::
+
+ @return_future
+ def future_func(arg1, arg2, callback):
+ # Do stuff (possibly asynchronous)
+ callback(result)
+
+ @gen.engine
+ def caller(callback):
+ yield future_func(arg1, arg2)
+ callback()
+
+ ..
+
+ Note that ``@return_future`` and ``@gen.engine`` can be applied to the
+ same function, provided ``@return_future`` appears first. However,
+ consider using ``@gen.coroutine`` instead of this combination.
+ """
+ replacer = ArgReplacer(f, 'callback')
+
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ future = TracebackFuture()
+ callback, args, kwargs = replacer.replace(
+ lambda value=_NO_RESULT: future.set_result(value),
+ args, kwargs)
+
+ def handle_error(typ, value, tb):
+ future.set_exc_info((typ, value, tb))
+ return True
+ exc_info = None
+ with ExceptionStackContext(handle_error):
+ try:
+ result = f(*args, **kwargs)
+ if result is not None:
+ raise ReturnValueIgnoredError(
+ "@return_future should not be used with functions "
+ "that return values")
+ except:
+ exc_info = sys.exc_info()
+ raise
+ if exc_info is not None:
+ # If the initial synchronous part of f() raised an exception,
+ # go ahead and raise it to the caller directly without waiting
+ # for them to inspect the Future.
+ future.result()
+
+ # If the caller passed in a callback, schedule it to be called
+ # when the future resolves. It is important that this happens
+ # just before we return the future, or else we risk confusing
+ # stack contexts with multiple exceptions (one here with the
+ # immediate exception, and again when the future resolves and
+ # the callback triggers its exception by calling future.result()).
+ if callback is not None:
+ def run_callback(future):
+ result = future.result()
+ if result is _NO_RESULT:
+ callback()
+ else:
+ callback(future.result())
+ future.add_done_callback(wrap(run_callback))
+ return future
+ return wrapper
+
+
+def chain_future(a, b):
+ """Chain two futures together so that when one completes, so does the other.
+
+ The result (success or failure) of ``a`` will be copied to ``b``, unless
+ ``b`` has already been completed or cancelled by the time ``a`` finishes.
+ """
+ def copy(future):
+ assert future is a
+ if b.done():
+ return
+ if (isinstance(a, TracebackFuture) and
+ isinstance(b, TracebackFuture) and
+ a.exc_info() is not None):
+ b.set_exc_info(a.exc_info())
+ elif a.exception() is not None:
+ b.set_exception(a.exception())
+ else:
+ b.set_result(a.result())
+ a.add_done_callback(copy)
diff --git a/contrib/python/tornado/tornado-4/tornado/curl_httpclient.py b/contrib/python/tornado/tornado-4/tornado/curl_httpclient.py
index 28492c16cd..8632c788c1 100644
--- a/contrib/python/tornado/tornado-4/tornado/curl_httpclient.py
+++ b/contrib/python/tornado/tornado-4/tornado/curl_httpclient.py
@@ -1,524 +1,524 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Non-blocking HTTP client implementation using pycurl."""
-
-from __future__ import absolute_import, division, print_function
-
-import collections
-import functools
-import logging
-import pycurl # type: ignore
-import threading
-import time
-from io import BytesIO
-
-from tornado import httputil
-from tornado import ioloop
-from tornado import stack_context
-
-from tornado.escape import utf8, native_str
-from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
-
-curl_log = logging.getLogger('tornado.curl_httpclient')
-
-
-class CurlAsyncHTTPClient(AsyncHTTPClient):
- def initialize(self, io_loop, max_clients=10, defaults=None):
- super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
- self._multi = pycurl.CurlMulti()
- self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
- self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
- self._curls = [self._curl_create() for i in range(max_clients)]
- self._free_list = self._curls[:]
- self._requests = collections.deque()
- self._fds = {}
- self._timeout = None
-
- # libcurl has bugs that sometimes cause it to not report all
- # relevant file descriptors and timeouts to TIMERFUNCTION/
- # SOCKETFUNCTION. Mitigate the effects of such bugs by
- # forcing a periodic scan of all active requests.
- self._force_timeout_callback = ioloop.PeriodicCallback(
- self._handle_force_timeout, 1000, io_loop=io_loop)
- self._force_timeout_callback.start()
-
- # Work around a bug in libcurl 7.29.0: Some fields in the curl
- # multi object are initialized lazily, and its destructor will
- # segfault if it is destroyed without having been used. Add
- # and remove a dummy handle to make sure everything is
- # initialized.
- dummy_curl_handle = pycurl.Curl()
- self._multi.add_handle(dummy_curl_handle)
- self._multi.remove_handle(dummy_curl_handle)
-
- def close(self):
- self._force_timeout_callback.stop()
- if self._timeout is not None:
- self.io_loop.remove_timeout(self._timeout)
- for curl in self._curls:
- curl.close()
- self._multi.close()
- super(CurlAsyncHTTPClient, self).close()
-
- def fetch_impl(self, request, callback):
- self._requests.append((request, callback))
- self._process_queue()
- self._set_timeout(0)
-
- def _handle_socket(self, event, fd, multi, data):
- """Called by libcurl when it wants to change the file descriptors
- it cares about.
- """
- event_map = {
- pycurl.POLL_NONE: ioloop.IOLoop.NONE,
- pycurl.POLL_IN: ioloop.IOLoop.READ,
- pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
- pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
- }
- if event == pycurl.POLL_REMOVE:
- if fd in self._fds:
- self.io_loop.remove_handler(fd)
- del self._fds[fd]
- else:
- ioloop_event = event_map[event]
- # libcurl sometimes closes a socket and then opens a new
- # one using the same FD without giving us a POLL_NONE in
- # between. This is a problem with the epoll IOLoop,
- # because the kernel can tell when a socket is closed and
- # removes it from the epoll automatically, causing future
- # update_handler calls to fail. Since we can't tell when
- # this has happened, always use remove and re-add
- # instead of update.
- if fd in self._fds:
- self.io_loop.remove_handler(fd)
- self.io_loop.add_handler(fd, self._handle_events,
- ioloop_event)
- self._fds[fd] = ioloop_event
-
- def _set_timeout(self, msecs):
- """Called by libcurl to schedule a timeout."""
- if self._timeout is not None:
- self.io_loop.remove_timeout(self._timeout)
- self._timeout = self.io_loop.add_timeout(
- self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
-
- def _handle_events(self, fd, events):
- """Called by IOLoop when there is activity on one of our
- file descriptors.
- """
- action = 0
- if events & ioloop.IOLoop.READ:
- action |= pycurl.CSELECT_IN
- if events & ioloop.IOLoop.WRITE:
- action |= pycurl.CSELECT_OUT
- while True:
- try:
- ret, num_handles = self._multi.socket_action(fd, action)
- except pycurl.error as e:
- ret = e.args[0]
- if ret != pycurl.E_CALL_MULTI_PERFORM:
- break
- self._finish_pending_requests()
-
- def _handle_timeout(self):
- """Called by IOLoop when the requested timeout has passed."""
- with stack_context.NullContext():
- self._timeout = None
- while True:
- try:
- ret, num_handles = self._multi.socket_action(
- pycurl.SOCKET_TIMEOUT, 0)
- except pycurl.error as e:
- ret = e.args[0]
- if ret != pycurl.E_CALL_MULTI_PERFORM:
- break
- self._finish_pending_requests()
-
- # In theory, we shouldn't have to do this because curl will
- # call _set_timeout whenever the timeout changes. However,
- # sometimes after _handle_timeout we will need to reschedule
- # immediately even though nothing has changed from curl's
- # perspective. This is because when socket_action is
- # called with SOCKET_TIMEOUT, libcurl decides internally which
- # timeouts need to be processed by using a monotonic clock
- # (where available) while tornado uses python's time.time()
- # to decide when timeouts have occurred. When those clocks
- # disagree on elapsed time (as they will whenever there is an
- # NTP adjustment), tornado might call _handle_timeout before
- # libcurl is ready. After each timeout, resync the scheduled
- # timeout with libcurl's current state.
- new_timeout = self._multi.timeout()
- if new_timeout >= 0:
- self._set_timeout(new_timeout)
-
- def _handle_force_timeout(self):
- """Called by IOLoop periodically to ask libcurl to process any
- events it may have forgotten about.
- """
- with stack_context.NullContext():
- while True:
- try:
- ret, num_handles = self._multi.socket_all()
- except pycurl.error as e:
- ret = e.args[0]
- if ret != pycurl.E_CALL_MULTI_PERFORM:
- break
- self._finish_pending_requests()
-
- def _finish_pending_requests(self):
- """Process any requests that were completed by the last
- call to multi.socket_action.
- """
- while True:
- num_q, ok_list, err_list = self._multi.info_read()
- for curl in ok_list:
- self._finish(curl)
- for curl, errnum, errmsg in err_list:
- self._finish(curl, errnum, errmsg)
- if num_q == 0:
- break
- self._process_queue()
-
- def _process_queue(self):
- with stack_context.NullContext():
- while True:
- started = 0
- while self._free_list and self._requests:
- started += 1
- curl = self._free_list.pop()
- (request, callback) = self._requests.popleft()
- curl.info = {
- "headers": httputil.HTTPHeaders(),
- "buffer": BytesIO(),
- "request": request,
- "callback": callback,
- "curl_start_time": time.time(),
- }
- try:
- self._curl_setup_request(
- curl, request, curl.info["buffer"],
- curl.info["headers"])
- except Exception as e:
- # If there was an error in setup, pass it on
- # to the callback. Note that allowing the
- # error to escape here will appear to work
- # most of the time since we are still in the
- # caller's original stack frame, but when
- # _process_queue() is called from
- # _finish_pending_requests the exceptions have
- # nowhere to go.
- self._free_list.append(curl)
- callback(HTTPResponse(
- request=request,
- code=599,
- error=e))
- else:
- self._multi.add_handle(curl)
-
- if not started:
- break
-
- def _finish(self, curl, curl_error=None, curl_message=None):
- info = curl.info
- curl.info = None
- self._multi.remove_handle(curl)
- self._free_list.append(curl)
- buffer = info["buffer"]
- if curl_error:
- error = CurlError(curl_error, curl_message)
- code = error.code
- effective_url = None
- buffer.close()
- buffer = None
- else:
- error = None
- code = curl.getinfo(pycurl.HTTP_CODE)
- effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
- buffer.seek(0)
- # the various curl timings are documented at
- # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
- time_info = dict(
- queue=info["curl_start_time"] - info["request"].start_time,
- namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
- connect=curl.getinfo(pycurl.CONNECT_TIME),
- pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
- starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
- total=curl.getinfo(pycurl.TOTAL_TIME),
- redirect=curl.getinfo(pycurl.REDIRECT_TIME),
- )
- try:
- info["callback"](HTTPResponse(
- request=info["request"], code=code, headers=info["headers"],
- buffer=buffer, effective_url=effective_url, error=error,
- reason=info['headers'].get("X-Http-Reason", None),
- request_time=time.time() - info["curl_start_time"],
- time_info=time_info))
- except Exception:
- self.handle_callback_exception(info["callback"])
-
- def handle_callback_exception(self, callback):
- self.io_loop.handle_callback_exception(callback)
-
- def _curl_create(self):
- curl = pycurl.Curl()
- if curl_log.isEnabledFor(logging.DEBUG):
- curl.setopt(pycurl.VERBOSE, 1)
- curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug)
- if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12)
- curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
- curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
- return curl
-
- def _curl_setup_request(self, curl, request, buffer, headers):
- curl.setopt(pycurl.URL, native_str(request.url))
-
- # libcurl's magic "Expect: 100-continue" behavior causes delays
- # with servers that don't support it (which include, among others,
- # Google's OpenID endpoint). Additionally, this behavior has
- # a bug in conjunction with the curl_multi_socket_action API
- # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
- # which increases the delays. It's more trouble than it's worth,
- # so just turn off the feature (yes, setting Expect: to an empty
- # value is the official way to disable this)
- if "Expect" not in request.headers:
- request.headers["Expect"] = ""
-
- # libcurl adds Pragma: no-cache by default; disable that too
- if "Pragma" not in request.headers:
- request.headers["Pragma"] = ""
-
- curl.setopt(pycurl.HTTPHEADER,
- ["%s: %s" % (native_str(k), native_str(v))
- for k, v in request.headers.get_all()])
-
- curl.setopt(pycurl.HEADERFUNCTION,
- functools.partial(self._curl_header_callback,
- headers, request.header_callback))
- if request.streaming_callback:
- def write_function(chunk):
- self.io_loop.add_callback(request.streaming_callback, chunk)
- else:
- write_function = buffer.write
- if bytes is str: # py2
- curl.setopt(pycurl.WRITEFUNCTION, write_function)
- else: # py3
- # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
- # a fork/port. That version has a bug in which it passes unicode
- # strings instead of bytes to the WRITEFUNCTION. This means that
- # if you use a WRITEFUNCTION (which tornado always does), you cannot
- # download arbitrary binary data. This needs to be fixed in the
- # ported pycurl package, but in the meantime this lambda will
- # make it work for downloading (utf8) text.
- curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
- curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
- curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
- curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
- curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
- if request.user_agent:
- curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
- else:
- curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
- if request.network_interface:
- curl.setopt(pycurl.INTERFACE, request.network_interface)
- if request.decompress_response:
- curl.setopt(pycurl.ENCODING, "gzip,deflate")
- else:
- curl.setopt(pycurl.ENCODING, "none")
- if request.proxy_host and request.proxy_port:
- curl.setopt(pycurl.PROXY, request.proxy_host)
- curl.setopt(pycurl.PROXYPORT, request.proxy_port)
- if request.proxy_username:
- credentials = '%s:%s' % (request.proxy_username,
- request.proxy_password)
- curl.setopt(pycurl.PROXYUSERPWD, credentials)
-
- if (request.proxy_auth_mode is None or
- request.proxy_auth_mode == "basic"):
- curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC)
- elif request.proxy_auth_mode == "digest":
- curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST)
- else:
- raise ValueError(
- "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode)
- else:
- curl.setopt(pycurl.PROXY, '')
- curl.unsetopt(pycurl.PROXYUSERPWD)
- if request.validate_cert:
- curl.setopt(pycurl.SSL_VERIFYPEER, 1)
- curl.setopt(pycurl.SSL_VERIFYHOST, 2)
- else:
- curl.setopt(pycurl.SSL_VERIFYPEER, 0)
- curl.setopt(pycurl.SSL_VERIFYHOST, 0)
- if request.ca_certs is not None:
- cafile, capath, cadata = None, None, None
- if callable(request.ca_certs):
- cafile, capath, cadata = request.ca_certs()
- else:
- cafile = request.ca_certs
- if cafile is not None:
- curl.setopt(pycurl.CAINFO, cafile)
- if capath is not None:
- curl.setopt(pycurl.CAPATH, capath)
- if cadata is not None:
- curl.set_ca_certs(cadata)
- else:
- # There is no way to restore pycurl.CAINFO to its default value
- # (Using unsetopt makes it reject all certificates).
- # I don't see any way to read the default value from python so it
- # can be restored later. We'll have to just leave CAINFO untouched
- # if no ca_certs file was specified, and require that if any
- # request uses a custom ca_certs file, they all must.
- pass
-
- if request.allow_ipv6 is False:
- # Curl behaves reasonably when DNS resolution gives an ipv6 address
- # that we can't reach, so allow ipv6 unless the user asks to disable.
- curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
- else:
- curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
-
- # Set the request method through curl's irritating interface which makes
- # up names for almost every single method
- curl_options = {
- "GET": pycurl.HTTPGET,
- "POST": pycurl.POST,
- "PUT": pycurl.UPLOAD,
- "HEAD": pycurl.NOBODY,
- }
- custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
- for o in curl_options.values():
- curl.setopt(o, False)
- if request.method in curl_options:
- curl.unsetopt(pycurl.CUSTOMREQUEST)
- curl.setopt(curl_options[request.method], True)
- elif request.allow_nonstandard_methods or request.method in custom_methods:
- curl.setopt(pycurl.CUSTOMREQUEST, request.method)
- else:
- raise KeyError('unknown method ' + request.method)
-
- body_expected = request.method in ("POST", "PATCH", "PUT")
- body_present = request.body is not None
- if not request.allow_nonstandard_methods:
- # Some HTTP methods nearly always have bodies while others
- # almost never do. Fail in this case unless the user has
- # opted out of sanity checks with allow_nonstandard_methods.
- if ((body_expected and not body_present) or
- (body_present and not body_expected)):
- raise ValueError(
- 'Body must %sbe None for method %s (unless '
- 'allow_nonstandard_methods is true)' %
- ('not ' if body_expected else '', request.method))
-
- if body_expected or body_present:
- if request.method == "GET":
- # Even with `allow_nonstandard_methods` we disallow
- # GET with a body (because libcurl doesn't allow it
- # unless we use CUSTOMREQUEST). While the spec doesn't
- # forbid clients from sending a body, it arguably
- # disallows the server from doing anything with them.
- raise ValueError('Body must be None for GET request')
- request_buffer = BytesIO(utf8(request.body or ''))
-
- def ioctl(cmd):
- if cmd == curl.IOCMD_RESTARTREAD:
- request_buffer.seek(0)
- curl.setopt(pycurl.READFUNCTION, request_buffer.read)
- curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
- if request.method == "POST":
- curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or ''))
- else:
- curl.setopt(pycurl.UPLOAD, True)
- curl.setopt(pycurl.INFILESIZE, len(request.body or ''))
-
- if request.auth_username is not None:
- userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
-
- if request.auth_mode is None or request.auth_mode == "basic":
- curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
- elif request.auth_mode == "digest":
- curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
- else:
- raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
-
- curl.setopt(pycurl.USERPWD, native_str(userpwd))
- curl_log.debug("%s %s (username: %r)", request.method, request.url,
- request.auth_username)
- else:
- curl.unsetopt(pycurl.USERPWD)
- curl_log.debug("%s %s", request.method, request.url)
-
- if request.client_cert is not None:
- curl.setopt(pycurl.SSLCERT, request.client_cert)
-
- if request.client_key is not None:
- curl.setopt(pycurl.SSLKEY, request.client_key)
-
- if request.ssl_options is not None:
- raise ValueError("ssl_options not supported in curl_httpclient")
-
- if threading.activeCount() > 1:
- # libcurl/pycurl is not thread-safe by default. When multiple threads
- # are used, signals should be disabled. This has the side effect
- # of disabling DNS timeouts in some environments (when libcurl is
- # not linked against ares), so we don't do it when there is only one
- # thread. Applications that use many short-lived threads may need
- # to set NOSIGNAL manually in a prepare_curl_callback since
- # there may not be any other threads running at the time we call
- # threading.activeCount.
- curl.setopt(pycurl.NOSIGNAL, 1)
- if request.prepare_curl_callback is not None:
- request.prepare_curl_callback(curl)
-
- def _curl_header_callback(self, headers, header_callback, header_line):
- header_line = native_str(header_line.decode('latin1'))
- if header_callback is not None:
- self.io_loop.add_callback(header_callback, header_line)
- # header_line as returned by curl includes the end-of-line characters.
- # whitespace at the start should be preserved to allow multi-line headers
- header_line = header_line.rstrip()
- if header_line.startswith("HTTP/"):
- headers.clear()
- try:
- (__, __, reason) = httputil.parse_response_start_line(header_line)
- header_line = "X-Http-Reason: %s" % reason
- except httputil.HTTPInputError:
- return
- if not header_line:
- return
- headers.parse_line(header_line)
-
- def _curl_debug(self, debug_type, debug_msg):
- debug_types = ('I', '<', '>', '<', '>')
- debug_msg = native_str(debug_msg)
- if debug_type == 0:
- curl_log.debug('%s', debug_msg.strip())
- elif debug_type in (1, 2):
- for line in debug_msg.splitlines():
- curl_log.debug('%s %s', debug_types[debug_type], line)
- elif debug_type == 4:
- curl_log.debug('%s %r', debug_types[debug_type], debug_msg)
-
-
-class CurlError(HTTPError):
- def __init__(self, errno, message):
- HTTPError.__init__(self, 599, message)
- self.errno = errno
-
-
-if __name__ == "__main__":
- AsyncHTTPClient.configure(CurlAsyncHTTPClient)
- main()
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Non-blocking HTTP client implementation using pycurl."""
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+import functools
+import logging
+import pycurl # type: ignore
+import threading
+import time
+from io import BytesIO
+
+from tornado import httputil
+from tornado import ioloop
+from tornado import stack_context
+
+from tornado.escape import utf8, native_str
+from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
+
+curl_log = logging.getLogger('tornado.curl_httpclient')
+
+
+class CurlAsyncHTTPClient(AsyncHTTPClient):
+ def initialize(self, io_loop, max_clients=10, defaults=None):
+ super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
+ self._multi = pycurl.CurlMulti()
+ self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
+ self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
+ self._curls = [self._curl_create() for i in range(max_clients)]
+ self._free_list = self._curls[:]
+ self._requests = collections.deque()
+ self._fds = {}
+ self._timeout = None
+
+ # libcurl has bugs that sometimes cause it to not report all
+ # relevant file descriptors and timeouts to TIMERFUNCTION/
+ # SOCKETFUNCTION. Mitigate the effects of such bugs by
+ # forcing a periodic scan of all active requests.
+ self._force_timeout_callback = ioloop.PeriodicCallback(
+ self._handle_force_timeout, 1000, io_loop=io_loop)
+ self._force_timeout_callback.start()
+
+ # Work around a bug in libcurl 7.29.0: Some fields in the curl
+ # multi object are initialized lazily, and its destructor will
+ # segfault if it is destroyed without having been used. Add
+ # and remove a dummy handle to make sure everything is
+ # initialized.
+ dummy_curl_handle = pycurl.Curl()
+ self._multi.add_handle(dummy_curl_handle)
+ self._multi.remove_handle(dummy_curl_handle)
+
+ def close(self):
+ self._force_timeout_callback.stop()
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ for curl in self._curls:
+ curl.close()
+ self._multi.close()
+ super(CurlAsyncHTTPClient, self).close()
+
+ def fetch_impl(self, request, callback):
+ self._requests.append((request, callback))
+ self._process_queue()
+ self._set_timeout(0)
+
+ def _handle_socket(self, event, fd, multi, data):
+ """Called by libcurl when it wants to change the file descriptors
+ it cares about.
+ """
+ event_map = {
+ pycurl.POLL_NONE: ioloop.IOLoop.NONE,
+ pycurl.POLL_IN: ioloop.IOLoop.READ,
+ pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
+ pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
+ }
+ if event == pycurl.POLL_REMOVE:
+ if fd in self._fds:
+ self.io_loop.remove_handler(fd)
+ del self._fds[fd]
+ else:
+ ioloop_event = event_map[event]
+ # libcurl sometimes closes a socket and then opens a new
+ # one using the same FD without giving us a POLL_NONE in
+ # between. This is a problem with the epoll IOLoop,
+ # because the kernel can tell when a socket is closed and
+ # removes it from the epoll automatically, causing future
+ # update_handler calls to fail. Since we can't tell when
+ # this has happened, always use remove and re-add
+ # instead of update.
+ if fd in self._fds:
+ self.io_loop.remove_handler(fd)
+ self.io_loop.add_handler(fd, self._handle_events,
+ ioloop_event)
+ self._fds[fd] = ioloop_event
+
+ def _set_timeout(self, msecs):
+ """Called by libcurl to schedule a timeout."""
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = self.io_loop.add_timeout(
+ self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
+
+ def _handle_events(self, fd, events):
+ """Called by IOLoop when there is activity on one of our
+ file descriptors.
+ """
+ action = 0
+ if events & ioloop.IOLoop.READ:
+ action |= pycurl.CSELECT_IN
+ if events & ioloop.IOLoop.WRITE:
+ action |= pycurl.CSELECT_OUT
+ while True:
+ try:
+ ret, num_handles = self._multi.socket_action(fd, action)
+ except pycurl.error as e:
+ ret = e.args[0]
+ if ret != pycurl.E_CALL_MULTI_PERFORM:
+ break
+ self._finish_pending_requests()
+
+ def _handle_timeout(self):
+ """Called by IOLoop when the requested timeout has passed."""
+ with stack_context.NullContext():
+ self._timeout = None
+ while True:
+ try:
+ ret, num_handles = self._multi.socket_action(
+ pycurl.SOCKET_TIMEOUT, 0)
+ except pycurl.error as e:
+ ret = e.args[0]
+ if ret != pycurl.E_CALL_MULTI_PERFORM:
+ break
+ self._finish_pending_requests()
+
+ # In theory, we shouldn't have to do this because curl will
+ # call _set_timeout whenever the timeout changes. However,
+ # sometimes after _handle_timeout we will need to reschedule
+ # immediately even though nothing has changed from curl's
+ # perspective. This is because when socket_action is
+ # called with SOCKET_TIMEOUT, libcurl decides internally which
+ # timeouts need to be processed by using a monotonic clock
+ # (where available) while tornado uses python's time.time()
+ # to decide when timeouts have occurred. When those clocks
+ # disagree on elapsed time (as they will whenever there is an
+ # NTP adjustment), tornado might call _handle_timeout before
+ # libcurl is ready. After each timeout, resync the scheduled
+ # timeout with libcurl's current state.
+ new_timeout = self._multi.timeout()
+ if new_timeout >= 0:
+ self._set_timeout(new_timeout)
+
+ def _handle_force_timeout(self):
+ """Called by IOLoop periodically to ask libcurl to process any
+ events it may have forgotten about.
+ """
+ with stack_context.NullContext():
+ while True:
+ try:
+ ret, num_handles = self._multi.socket_all()
+ except pycurl.error as e:
+ ret = e.args[0]
+ if ret != pycurl.E_CALL_MULTI_PERFORM:
+ break
+ self._finish_pending_requests()
+
+ def _finish_pending_requests(self):
+ """Process any requests that were completed by the last
+ call to multi.socket_action.
+ """
+ while True:
+ num_q, ok_list, err_list = self._multi.info_read()
+ for curl in ok_list:
+ self._finish(curl)
+ for curl, errnum, errmsg in err_list:
+ self._finish(curl, errnum, errmsg)
+ if num_q == 0:
+ break
+ self._process_queue()
+
+ def _process_queue(self):
+ with stack_context.NullContext():
+ while True:
+ started = 0
+ while self._free_list and self._requests:
+ started += 1
+ curl = self._free_list.pop()
+ (request, callback) = self._requests.popleft()
+ curl.info = {
+ "headers": httputil.HTTPHeaders(),
+ "buffer": BytesIO(),
+ "request": request,
+ "callback": callback,
+ "curl_start_time": time.time(),
+ }
+ try:
+ self._curl_setup_request(
+ curl, request, curl.info["buffer"],
+ curl.info["headers"])
+ except Exception as e:
+ # If there was an error in setup, pass it on
+ # to the callback. Note that allowing the
+ # error to escape here will appear to work
+ # most of the time since we are still in the
+ # caller's original stack frame, but when
+ # _process_queue() is called from
+ # _finish_pending_requests the exceptions have
+ # nowhere to go.
+ self._free_list.append(curl)
+ callback(HTTPResponse(
+ request=request,
+ code=599,
+ error=e))
+ else:
+ self._multi.add_handle(curl)
+
+ if not started:
+ break
+
+ def _finish(self, curl, curl_error=None, curl_message=None):
+ info = curl.info
+ curl.info = None
+ self._multi.remove_handle(curl)
+ self._free_list.append(curl)
+ buffer = info["buffer"]
+ if curl_error:
+ error = CurlError(curl_error, curl_message)
+ code = error.code
+ effective_url = None
+ buffer.close()
+ buffer = None
+ else:
+ error = None
+ code = curl.getinfo(pycurl.HTTP_CODE)
+ effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
+ buffer.seek(0)
+ # the various curl timings are documented at
+ # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
+ time_info = dict(
+ queue=info["curl_start_time"] - info["request"].start_time,
+ namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
+ connect=curl.getinfo(pycurl.CONNECT_TIME),
+ pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
+ starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
+ total=curl.getinfo(pycurl.TOTAL_TIME),
+ redirect=curl.getinfo(pycurl.REDIRECT_TIME),
+ )
+ try:
+ info["callback"](HTTPResponse(
+ request=info["request"], code=code, headers=info["headers"],
+ buffer=buffer, effective_url=effective_url, error=error,
+ reason=info['headers'].get("X-Http-Reason", None),
+ request_time=time.time() - info["curl_start_time"],
+ time_info=time_info))
+ except Exception:
+ self.handle_callback_exception(info["callback"])
+
+ def handle_callback_exception(self, callback):
+ self.io_loop.handle_callback_exception(callback)
+
+ def _curl_create(self):
+ curl = pycurl.Curl()
+ if curl_log.isEnabledFor(logging.DEBUG):
+ curl.setopt(pycurl.VERBOSE, 1)
+ curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug)
+ if hasattr(pycurl, 'PROTOCOLS'): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12)
+ curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
+ curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
+ return curl
+
+ def _curl_setup_request(self, curl, request, buffer, headers):
+ curl.setopt(pycurl.URL, native_str(request.url))
+
+ # libcurl's magic "Expect: 100-continue" behavior causes delays
+ # with servers that don't support it (which include, among others,
+ # Google's OpenID endpoint). Additionally, this behavior has
+ # a bug in conjunction with the curl_multi_socket_action API
+ # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
+ # which increases the delays. It's more trouble than it's worth,
+ # so just turn off the feature (yes, setting Expect: to an empty
+ # value is the official way to disable this)
+ if "Expect" not in request.headers:
+ request.headers["Expect"] = ""
+
+ # libcurl adds Pragma: no-cache by default; disable that too
+ if "Pragma" not in request.headers:
+ request.headers["Pragma"] = ""
+
+ curl.setopt(pycurl.HTTPHEADER,
+ ["%s: %s" % (native_str(k), native_str(v))
+ for k, v in request.headers.get_all()])
+
+ curl.setopt(pycurl.HEADERFUNCTION,
+ functools.partial(self._curl_header_callback,
+ headers, request.header_callback))
+ if request.streaming_callback:
+ def write_function(chunk):
+ self.io_loop.add_callback(request.streaming_callback, chunk)
+ else:
+ write_function = buffer.write
+ if bytes is str: # py2
+ curl.setopt(pycurl.WRITEFUNCTION, write_function)
+ else: # py3
+ # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
+ # a fork/port. That version has a bug in which it passes unicode
+ # strings instead of bytes to the WRITEFUNCTION. This means that
+ # if you use a WRITEFUNCTION (which tornado always does), you cannot
+ # download arbitrary binary data. This needs to be fixed in the
+ # ported pycurl package, but in the meantime this lambda will
+ # make it work for downloading (utf8) text.
+ curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
+ curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
+ curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
+ curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
+ curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
+ if request.user_agent:
+ curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
+ else:
+ curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
+ if request.network_interface:
+ curl.setopt(pycurl.INTERFACE, request.network_interface)
+ if request.decompress_response:
+ curl.setopt(pycurl.ENCODING, "gzip,deflate")
+ else:
+ curl.setopt(pycurl.ENCODING, "none")
+ if request.proxy_host and request.proxy_port:
+ curl.setopt(pycurl.PROXY, request.proxy_host)
+ curl.setopt(pycurl.PROXYPORT, request.proxy_port)
+ if request.proxy_username:
+ credentials = '%s:%s' % (request.proxy_username,
+ request.proxy_password)
+ curl.setopt(pycurl.PROXYUSERPWD, credentials)
+
+ if (request.proxy_auth_mode is None or
+ request.proxy_auth_mode == "basic"):
+ curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC)
+ elif request.proxy_auth_mode == "digest":
+ curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST)
+ else:
+ raise ValueError(
+ "Unsupported proxy_auth_mode %s" % request.proxy_auth_mode)
+ else:
+ curl.setopt(pycurl.PROXY, '')
+ curl.unsetopt(pycurl.PROXYUSERPWD)
+ if request.validate_cert:
+ curl.setopt(pycurl.SSL_VERIFYPEER, 1)
+ curl.setopt(pycurl.SSL_VERIFYHOST, 2)
+ else:
+ curl.setopt(pycurl.SSL_VERIFYPEER, 0)
+ curl.setopt(pycurl.SSL_VERIFYHOST, 0)
+ if request.ca_certs is not None:
+ cafile, capath, cadata = None, None, None
+ if callable(request.ca_certs):
+ cafile, capath, cadata = request.ca_certs()
+ else:
+ cafile = request.ca_certs
+ if cafile is not None:
+ curl.setopt(pycurl.CAINFO, cafile)
+ if capath is not None:
+ curl.setopt(pycurl.CAPATH, capath)
+ if cadata is not None:
+ curl.set_ca_certs(cadata)
+ else:
+ # There is no way to restore pycurl.CAINFO to its default value
+ # (Using unsetopt makes it reject all certificates).
+ # I don't see any way to read the default value from python so it
+ # can be restored later. We'll have to just leave CAINFO untouched
+ # if no ca_certs file was specified, and require that if any
+ # request uses a custom ca_certs file, they all must.
+ pass
+
+ if request.allow_ipv6 is False:
+ # Curl behaves reasonably when DNS resolution gives an ipv6 address
+ # that we can't reach, so allow ipv6 unless the user asks to disable.
+ curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
+ else:
+ curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
+
+ # Set the request method through curl's irritating interface which makes
+ # up names for almost every single method
+ curl_options = {
+ "GET": pycurl.HTTPGET,
+ "POST": pycurl.POST,
+ "PUT": pycurl.UPLOAD,
+ "HEAD": pycurl.NOBODY,
+ }
+ custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
+ for o in curl_options.values():
+ curl.setopt(o, False)
+ if request.method in curl_options:
+ curl.unsetopt(pycurl.CUSTOMREQUEST)
+ curl.setopt(curl_options[request.method], True)
+ elif request.allow_nonstandard_methods or request.method in custom_methods:
+ curl.setopt(pycurl.CUSTOMREQUEST, request.method)
+ else:
+ raise KeyError('unknown method ' + request.method)
+
+ body_expected = request.method in ("POST", "PATCH", "PUT")
+ body_present = request.body is not None
+ if not request.allow_nonstandard_methods:
+ # Some HTTP methods nearly always have bodies while others
+ # almost never do. Fail in this case unless the user has
+ # opted out of sanity checks with allow_nonstandard_methods.
+ if ((body_expected and not body_present) or
+ (body_present and not body_expected)):
+ raise ValueError(
+ 'Body must %sbe None for method %s (unless '
+ 'allow_nonstandard_methods is true)' %
+ ('not ' if body_expected else '', request.method))
+
+ if body_expected or body_present:
+ if request.method == "GET":
+ # Even with `allow_nonstandard_methods` we disallow
+ # GET with a body (because libcurl doesn't allow it
+ # unless we use CUSTOMREQUEST). While the spec doesn't
+ # forbid clients from sending a body, it arguably
+ # disallows the server from doing anything with them.
+ raise ValueError('Body must be None for GET request')
+ request_buffer = BytesIO(utf8(request.body or ''))
+
+ def ioctl(cmd):
+ if cmd == curl.IOCMD_RESTARTREAD:
+ request_buffer.seek(0)
+ curl.setopt(pycurl.READFUNCTION, request_buffer.read)
+ curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
+ if request.method == "POST":
+ curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or ''))
+ else:
+ curl.setopt(pycurl.UPLOAD, True)
+ curl.setopt(pycurl.INFILESIZE, len(request.body or ''))
+
+ if request.auth_username is not None:
+ userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
+
+ if request.auth_mode is None or request.auth_mode == "basic":
+ curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
+ elif request.auth_mode == "digest":
+ curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
+ else:
+ raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
+
+ curl.setopt(pycurl.USERPWD, native_str(userpwd))
+ curl_log.debug("%s %s (username: %r)", request.method, request.url,
+ request.auth_username)
+ else:
+ curl.unsetopt(pycurl.USERPWD)
+ curl_log.debug("%s %s", request.method, request.url)
+
+ if request.client_cert is not None:
+ curl.setopt(pycurl.SSLCERT, request.client_cert)
+
+ if request.client_key is not None:
+ curl.setopt(pycurl.SSLKEY, request.client_key)
+
+ if request.ssl_options is not None:
+ raise ValueError("ssl_options not supported in curl_httpclient")
+
+ if threading.activeCount() > 1:
+ # libcurl/pycurl is not thread-safe by default. When multiple threads
+ # are used, signals should be disabled. This has the side effect
+ # of disabling DNS timeouts in some environments (when libcurl is
+ # not linked against ares), so we don't do it when there is only one
+ # thread. Applications that use many short-lived threads may need
+ # to set NOSIGNAL manually in a prepare_curl_callback since
+ # there may not be any other threads running at the time we call
+ # threading.activeCount.
+ curl.setopt(pycurl.NOSIGNAL, 1)
+ if request.prepare_curl_callback is not None:
+ request.prepare_curl_callback(curl)
+
+ def _curl_header_callback(self, headers, header_callback, header_line):
+ header_line = native_str(header_line.decode('latin1'))
+ if header_callback is not None:
+ self.io_loop.add_callback(header_callback, header_line)
+ # header_line as returned by curl includes the end-of-line characters.
+ # whitespace at the start should be preserved to allow multi-line headers
+ header_line = header_line.rstrip()
+ if header_line.startswith("HTTP/"):
+ headers.clear()
+ try:
+ (__, __, reason) = httputil.parse_response_start_line(header_line)
+ header_line = "X-Http-Reason: %s" % reason
+ except httputil.HTTPInputError:
+ return
+ if not header_line:
+ return
+ headers.parse_line(header_line)
+
+ def _curl_debug(self, debug_type, debug_msg):
+ debug_types = ('I', '<', '>', '<', '>')
+ debug_msg = native_str(debug_msg)
+ if debug_type == 0:
+ curl_log.debug('%s', debug_msg.strip())
+ elif debug_type in (1, 2):
+ for line in debug_msg.splitlines():
+ curl_log.debug('%s %s', debug_types[debug_type], line)
+ elif debug_type == 4:
+ curl_log.debug('%s %r', debug_types[debug_type], debug_msg)
+
+
+class CurlError(HTTPError):
+ def __init__(self, errno, message):
+ HTTPError.__init__(self, 599, message)
+ self.errno = errno
+
+
+if __name__ == "__main__":
+ AsyncHTTPClient.configure(CurlAsyncHTTPClient)
+ main()
diff --git a/contrib/python/tornado/tornado-4/tornado/escape.py b/contrib/python/tornado/tornado-4/tornado/escape.py
index 2ca3fe3fe8..0fc63f8d20 100644
--- a/contrib/python/tornado/tornado-4/tornado/escape.py
+++ b/contrib/python/tornado/tornado-4/tornado/escape.py
@@ -1,398 +1,398 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
-
-Also includes a few other miscellaneous string manipulation functions that
-have crept in over time.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import json
-import re
-
-from tornado.util import PY3, unicode_type, basestring_type
-
-if PY3:
- from urllib.parse import parse_qs as _parse_qs
- import html.entities as htmlentitydefs
- import urllib.parse as urllib_parse
- unichr = chr
-else:
- from urlparse import parse_qs as _parse_qs
- import htmlentitydefs
- import urllib as urllib_parse
-
-try:
- import typing # noqa
-except ImportError:
- pass
-
-
-_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
-_XHTML_ESCAPE_DICT = {'&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;',
- '\'': '&#39;'}
-
-
-def xhtml_escape(value):
- """Escapes a string so it is valid within HTML or XML.
-
- Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
- When used in attribute values the escaped strings must be enclosed
- in quotes.
-
- .. versionchanged:: 3.2
-
- Added the single quote to the list of escaped characters.
- """
- return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
- to_basestring(value))
-
-
-def xhtml_unescape(value):
- """Un-escapes an XML-escaped string."""
- return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
-
-
-# The fact that json_encode wraps json.dumps is an implementation detail.
-# Please see https://github.com/tornadoweb/tornado/pull/706
-# before sending a pull request that adds **kwargs to this function.
-def json_encode(value):
- """JSON-encodes the given Python object."""
- # JSON permits but does not require forward slashes to be escaped.
- # This is useful when json data is emitted in a <script> tag
- # in HTML, as it prevents </script> tags from prematurely terminating
- # the javascript. Some json libraries do this escaping by default,
- # although python's standard library does not, so we do it here.
- # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
- return json.dumps(value).replace("</", "<\\/")
-
-
-def json_decode(value):
- """Returns Python objects for the given JSON string."""
- return json.loads(to_basestring(value))
-
-
-def squeeze(value):
- """Replace all sequences of whitespace chars with a single space."""
- return re.sub(r"[\x00-\x20]+", " ", value).strip()
-
-
-def url_escape(value, plus=True):
- """Returns a URL-encoded version of the given value.
-
- If ``plus`` is true (the default), spaces will be represented
- as "+" instead of "%20". This is appropriate for query strings
- but not for the path component of a URL. Note that this default
- is the reverse of Python's urllib module.
-
- .. versionadded:: 3.1
- The ``plus`` argument
- """
- quote = urllib_parse.quote_plus if plus else urllib_parse.quote
- return quote(utf8(value))
-
-
-# python 3 changed things around enough that we need two separate
-# implementations of url_unescape. We also need our own implementation
-# of parse_qs since python 3's version insists on decoding everything.
-if not PY3:
- def url_unescape(value, encoding='utf-8', plus=True):
- """Decodes the given value from a URL.
-
- The argument may be either a byte or unicode string.
-
- If encoding is None, the result will be a byte string. Otherwise,
- the result is a unicode string in the specified encoding.
-
- If ``plus`` is true (the default), plus signs will be interpreted
- as spaces (literal plus signs must be represented as "%2B"). This
- is appropriate for query strings and form-encoded values but not
- for the path component of a URL. Note that this default is the
- reverse of Python's urllib module.
-
- .. versionadded:: 3.1
- The ``plus`` argument
- """
- unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
- if encoding is None:
- return unquote(utf8(value))
- else:
- return unicode_type(unquote(utf8(value)), encoding)
-
- parse_qs_bytes = _parse_qs
-else:
- def url_unescape(value, encoding='utf-8', plus=True):
- """Decodes the given value from a URL.
-
- The argument may be either a byte or unicode string.
-
- If encoding is None, the result will be a byte string. Otherwise,
- the result is a unicode string in the specified encoding.
-
- If ``plus`` is true (the default), plus signs will be interpreted
- as spaces (literal plus signs must be represented as "%2B"). This
- is appropriate for query strings and form-encoded values but not
- for the path component of a URL. Note that this default is the
- reverse of Python's urllib module.
-
- .. versionadded:: 3.1
- The ``plus`` argument
- """
- if encoding is None:
- if plus:
- # unquote_to_bytes doesn't have a _plus variant
- value = to_basestring(value).replace('+', ' ')
- return urllib_parse.unquote_to_bytes(value)
- else:
- unquote = (urllib_parse.unquote_plus if plus
- else urllib_parse.unquote)
- return unquote(to_basestring(value), encoding=encoding)
-
- def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
- """Parses a query string like urlparse.parse_qs, but returns the
- values as byte strings.
-
- Keys still become type str (interpreted as latin1 in python3!)
- because it's too painful to keep them as byte strings in
- python3 and in practice they're nearly always ascii anyway.
- """
- # This is gross, but python3 doesn't give us another way.
- # Latin1 is the universal donor of character encodings.
- result = _parse_qs(qs, keep_blank_values, strict_parsing,
- encoding='latin1', errors='strict')
- encoded = {}
- for k, v in result.items():
- encoded[k] = [i.encode('latin1') for i in v]
- return encoded
-
-
-_UTF8_TYPES = (bytes, type(None))
-
-
-def utf8(value):
- # type: (typing.Union[bytes,unicode_type,None])->typing.Union[bytes,None]
- """Converts a string argument to a byte string.
-
- If the argument is already a byte string or None, it is returned unchanged.
- Otherwise it must be a unicode string and is encoded as utf8.
- """
- if isinstance(value, _UTF8_TYPES):
- return value
- if not isinstance(value, unicode_type):
- raise TypeError(
- "Expected bytes, unicode, or None; got %r" % type(value)
- )
- return value.encode("utf-8")
-
-
-_TO_UNICODE_TYPES = (unicode_type, type(None))
-
-
-def to_unicode(value):
- """Converts a string argument to a unicode string.
-
- If the argument is already a unicode string or None, it is returned
- unchanged. Otherwise it must be a byte string and is decoded as utf8.
- """
- if isinstance(value, _TO_UNICODE_TYPES):
- return value
- if not isinstance(value, bytes):
- raise TypeError(
- "Expected bytes, unicode, or None; got %r" % type(value)
- )
- return value.decode("utf-8")
-
-
-# to_unicode was previously named _unicode not because it was private,
-# but to avoid conflicts with the built-in unicode() function/type
-_unicode = to_unicode
-
-# When dealing with the standard library across python 2 and 3 it is
-# sometimes useful to have a direct conversion to the native string type
-if str is unicode_type:
- native_str = to_unicode
-else:
- native_str = utf8
-
-_BASESTRING_TYPES = (basestring_type, type(None))
-
-
-def to_basestring(value):
- """Converts a string argument to a subclass of basestring.
-
- In python2, byte and unicode strings are mostly interchangeable,
- so functions that deal with a user-supplied argument in combination
- with ascii string constants can use either and should return the type
- the user supplied. In python3, the two types are not interchangeable,
- so this method is needed to convert byte strings to unicode.
- """
- if isinstance(value, _BASESTRING_TYPES):
- return value
- if not isinstance(value, bytes):
- raise TypeError(
- "Expected bytes, unicode, or None; got %r" % type(value)
- )
- return value.decode("utf-8")
-
-
-def recursive_unicode(obj):
- """Walks a simple data structure, converting byte strings to unicode.
-
- Supports lists, tuples, and dictionaries.
- """
- if isinstance(obj, dict):
- return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
- elif isinstance(obj, list):
- return list(recursive_unicode(i) for i in obj)
- elif isinstance(obj, tuple):
- return tuple(recursive_unicode(i) for i in obj)
- elif isinstance(obj, bytes):
- return to_unicode(obj)
- else:
- return obj
-
-
-# I originally used the regex from
-# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
-# but it gets all exponential on certain patterns (such as too many trailing
-# dots), causing the regex matcher to never return.
-# This regex should avoid those problems.
-# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
-# processed as escapes.
-_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)"""))
-
-
-def linkify(text, shorten=False, extra_params="",
- require_protocol=False, permitted_protocols=["http", "https"]):
- """Converts plain text into HTML with links.
-
- For example: ``linkify("Hello http://tornadoweb.org!")`` would return
- ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
-
- Parameters:
-
- * ``shorten``: Long urls will be shortened for display.
-
- * ``extra_params``: Extra text to include in the link tag, or a callable
- taking the link as an argument and returning the extra text
- e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
- or::
-
- def extra_params_cb(url):
- if url.startswith("http://example.com"):
- return 'class="internal"'
- else:
- return 'class="external" rel="nofollow"'
- linkify(text, extra_params=extra_params_cb)
-
- * ``require_protocol``: Only linkify urls which include a protocol. If
- this is False, urls such as www.facebook.com will also be linkified.
-
- * ``permitted_protocols``: List (or set) of protocols which should be
- linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
- "mailto"])``. It is very unsafe to include protocols such as
- ``javascript``.
- """
- if extra_params and not callable(extra_params):
- extra_params = " " + extra_params.strip()
-
- def make_link(m):
- url = m.group(1)
- proto = m.group(2)
- if require_protocol and not proto:
- return url # not protocol, no linkify
-
- if proto and proto not in permitted_protocols:
- return url # bad protocol, no linkify
-
- href = m.group(1)
- if not proto:
- href = "http://" + href # no proto specified, use http
-
- if callable(extra_params):
- params = " " + extra_params(href).strip()
- else:
- params = extra_params
-
- # clip long urls. max_len is just an approximation
- max_len = 30
- if shorten and len(url) > max_len:
- before_clip = url
- if proto:
- proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
- else:
- proto_len = 0
-
- parts = url[proto_len:].split("/")
- if len(parts) > 1:
- # Grab the whole host part plus the first bit of the path
- # The path is usually not that interesting once shortened
- # (no more slug, etc), so it really just provides a little
- # extra indication of shortening.
- url = url[:proto_len] + parts[0] + "/" + \
- parts[1][:8].split('?')[0].split('.')[0]
-
- if len(url) > max_len * 1.5: # still too long
- url = url[:max_len]
-
- if url != before_clip:
- amp = url.rfind('&')
- # avoid splitting html char entities
- if amp > max_len - 5:
- url = url[:amp]
- url += "..."
-
- if len(url) >= len(before_clip):
- url = before_clip
- else:
- # full url is visible on mouse-over (for those who don't
- # have a status bar, such as Safari by default)
- params += ' title="%s"' % href
-
- return u'<a href="%s"%s>%s</a>' % (href, params, url)
-
- # First HTML-escape so that our strings are all safe.
- # The regex is modified to avoid character entites other than &amp; so
- # that we won't pick up &quot;, etc.
- text = _unicode(xhtml_escape(text))
- return _URL_RE.sub(make_link, text)
-
-
-def _convert_entity(m):
- if m.group(1) == "#":
- try:
- if m.group(2)[:1].lower() == 'x':
- return unichr(int(m.group(2)[1:], 16))
- else:
- return unichr(int(m.group(2)))
- except ValueError:
- return "&#%s;" % m.group(2)
- try:
- return _HTML_UNICODE_MAP[m.group(2)]
- except KeyError:
- return "&%s;" % m.group(2)
-
-
-def _build_unicode_map():
- unicode_map = {}
- for name, value in htmlentitydefs.name2codepoint.items():
- unicode_map[name] = unichr(value)
- return unicode_map
-
-
-_HTML_UNICODE_MAP = _build_unicode_map()
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
+
+Also includes a few other miscellaneous string manipulation functions that
+have crept in over time.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import json
+import re
+
+from tornado.util import PY3, unicode_type, basestring_type
+
+if PY3:
+ from urllib.parse import parse_qs as _parse_qs
+ import html.entities as htmlentitydefs
+ import urllib.parse as urllib_parse
+ unichr = chr
+else:
+ from urlparse import parse_qs as _parse_qs
+ import htmlentitydefs
+ import urllib as urllib_parse
+
+try:
+ import typing # noqa
+except ImportError:
+ pass
+
+
+_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
+_XHTML_ESCAPE_DICT = {'&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;',
+ '\'': '&#39;'}
+
+
+def xhtml_escape(value):
+ """Escapes a string so it is valid within HTML or XML.
+
+ Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
+ When used in attribute values the escaped strings must be enclosed
+ in quotes.
+
+ .. versionchanged:: 3.2
+
+ Added the single quote to the list of escaped characters.
+ """
+ return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
+ to_basestring(value))
+
+
+def xhtml_unescape(value):
+ """Un-escapes an XML-escaped string."""
+ return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
+
+
+# The fact that json_encode wraps json.dumps is an implementation detail.
+# Please see https://github.com/tornadoweb/tornado/pull/706
+# before sending a pull request that adds **kwargs to this function.
+def json_encode(value):
+ """JSON-encodes the given Python object."""
+ # JSON permits but does not require forward slashes to be escaped.
+ # This is useful when json data is emitted in a <script> tag
+ # in HTML, as it prevents </script> tags from prematurely terminating
+ # the javascript. Some json libraries do this escaping by default,
+ # although python's standard library does not, so we do it here.
+ # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
+ return json.dumps(value).replace("</", "<\\/")
+
+
+def json_decode(value):
+ """Returns Python objects for the given JSON string."""
+ return json.loads(to_basestring(value))
+
+
+def squeeze(value):
+ """Replace all sequences of whitespace chars with a single space."""
+ return re.sub(r"[\x00-\x20]+", " ", value).strip()
+
+
+def url_escape(value, plus=True):
+ """Returns a URL-encoded version of the given value.
+
+ If ``plus`` is true (the default), spaces will be represented
+ as "+" instead of "%20". This is appropriate for query strings
+ but not for the path component of a URL. Note that this default
+ is the reverse of Python's urllib module.
+
+ .. versionadded:: 3.1
+ The ``plus`` argument
+ """
+ quote = urllib_parse.quote_plus if plus else urllib_parse.quote
+ return quote(utf8(value))
+
+
+# python 3 changed things around enough that we need two separate
+# implementations of url_unescape. We also need our own implementation
+# of parse_qs since python 3's version insists on decoding everything.
+if not PY3:
+ def url_unescape(value, encoding='utf-8', plus=True):
+ """Decodes the given value from a URL.
+
+ The argument may be either a byte or unicode string.
+
+ If encoding is None, the result will be a byte string. Otherwise,
+ the result is a unicode string in the specified encoding.
+
+ If ``plus`` is true (the default), plus signs will be interpreted
+ as spaces (literal plus signs must be represented as "%2B"). This
+ is appropriate for query strings and form-encoded values but not
+ for the path component of a URL. Note that this default is the
+ reverse of Python's urllib module.
+
+ .. versionadded:: 3.1
+ The ``plus`` argument
+ """
+ unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
+ if encoding is None:
+ return unquote(utf8(value))
+ else:
+ return unicode_type(unquote(utf8(value)), encoding)
+
+ parse_qs_bytes = _parse_qs
+else:
+ def url_unescape(value, encoding='utf-8', plus=True):
+ """Decodes the given value from a URL.
+
+ The argument may be either a byte or unicode string.
+
+ If encoding is None, the result will be a byte string. Otherwise,
+ the result is a unicode string in the specified encoding.
+
+ If ``plus`` is true (the default), plus signs will be interpreted
+ as spaces (literal plus signs must be represented as "%2B"). This
+ is appropriate for query strings and form-encoded values but not
+ for the path component of a URL. Note that this default is the
+ reverse of Python's urllib module.
+
+ .. versionadded:: 3.1
+ The ``plus`` argument
+ """
+ if encoding is None:
+ if plus:
+ # unquote_to_bytes doesn't have a _plus variant
+ value = to_basestring(value).replace('+', ' ')
+ return urllib_parse.unquote_to_bytes(value)
+ else:
+ unquote = (urllib_parse.unquote_plus if plus
+ else urllib_parse.unquote)
+ return unquote(to_basestring(value), encoding=encoding)
+
+ def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
+ """Parses a query string like urlparse.parse_qs, but returns the
+ values as byte strings.
+
+ Keys still become type str (interpreted as latin1 in python3!)
+ because it's too painful to keep them as byte strings in
+ python3 and in practice they're nearly always ascii anyway.
+ """
+ # This is gross, but python3 doesn't give us another way.
+ # Latin1 is the universal donor of character encodings.
+ result = _parse_qs(qs, keep_blank_values, strict_parsing,
+ encoding='latin1', errors='strict')
+ encoded = {}
+ for k, v in result.items():
+ encoded[k] = [i.encode('latin1') for i in v]
+ return encoded
+
+
+_UTF8_TYPES = (bytes, type(None))
+
+
+def utf8(value):
+ # type: (typing.Union[bytes,unicode_type,None])->typing.Union[bytes,None]
+ """Converts a string argument to a byte string.
+
+ If the argument is already a byte string or None, it is returned unchanged.
+ Otherwise it must be a unicode string and is encoded as utf8.
+ """
+ if isinstance(value, _UTF8_TYPES):
+ return value
+ if not isinstance(value, unicode_type):
+ raise TypeError(
+ "Expected bytes, unicode, or None; got %r" % type(value)
+ )
+ return value.encode("utf-8")
+
+
+_TO_UNICODE_TYPES = (unicode_type, type(None))
+
+
+def to_unicode(value):
+ """Converts a string argument to a unicode string.
+
+ If the argument is already a unicode string or None, it is returned
+ unchanged. Otherwise it must be a byte string and is decoded as utf8.
+ """
+ if isinstance(value, _TO_UNICODE_TYPES):
+ return value
+ if not isinstance(value, bytes):
+ raise TypeError(
+ "Expected bytes, unicode, or None; got %r" % type(value)
+ )
+ return value.decode("utf-8")
+
+
+# to_unicode was previously named _unicode not because it was private,
+# but to avoid conflicts with the built-in unicode() function/type
+_unicode = to_unicode
+
+# When dealing with the standard library across python 2 and 3 it is
+# sometimes useful to have a direct conversion to the native string type
+if str is unicode_type:
+ native_str = to_unicode
+else:
+ native_str = utf8
+
+_BASESTRING_TYPES = (basestring_type, type(None))
+
+
+def to_basestring(value):
+ """Converts a string argument to a subclass of basestring.
+
+ In python2, byte and unicode strings are mostly interchangeable,
+ so functions that deal with a user-supplied argument in combination
+ with ascii string constants can use either and should return the type
+ the user supplied. In python3, the two types are not interchangeable,
+ so this method is needed to convert byte strings to unicode.
+ """
+ if isinstance(value, _BASESTRING_TYPES):
+ return value
+ if not isinstance(value, bytes):
+ raise TypeError(
+ "Expected bytes, unicode, or None; got %r" % type(value)
+ )
+ return value.decode("utf-8")
+
+
+def recursive_unicode(obj):
+ """Walks a simple data structure, converting byte strings to unicode.
+
+ Supports lists, tuples, and dictionaries.
+ """
+ if isinstance(obj, dict):
+ return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
+ elif isinstance(obj, list):
+ return list(recursive_unicode(i) for i in obj)
+ elif isinstance(obj, tuple):
+ return tuple(recursive_unicode(i) for i in obj)
+ elif isinstance(obj, bytes):
+ return to_unicode(obj)
+ else:
+ return obj
+
+
+# I originally used the regex from
+# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
+# but it gets all exponential on certain patterns (such as too many trailing
+# dots), causing the regex matcher to never return.
+# This regex should avoid those problems.
+# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
+# processed as escapes.
+_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&amp;|&quot;)*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&amp;|&quot;)*\)))+)"""))
+
+
+def linkify(text, shorten=False, extra_params="",
+ require_protocol=False, permitted_protocols=["http", "https"]):
+ """Converts plain text into HTML with links.
+
+ For example: ``linkify("Hello http://tornadoweb.org!")`` would return
+ ``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
+
+ Parameters:
+
+ * ``shorten``: Long urls will be shortened for display.
+
+ * ``extra_params``: Extra text to include in the link tag, or a callable
+ taking the link as an argument and returning the extra text
+ e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
+ or::
+
+ def extra_params_cb(url):
+ if url.startswith("http://example.com"):
+ return 'class="internal"'
+ else:
+ return 'class="external" rel="nofollow"'
+ linkify(text, extra_params=extra_params_cb)
+
+ * ``require_protocol``: Only linkify urls which include a protocol. If
+ this is False, urls such as www.facebook.com will also be linkified.
+
+ * ``permitted_protocols``: List (or set) of protocols which should be
+ linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
+ "mailto"])``. It is very unsafe to include protocols such as
+ ``javascript``.
+ """
+ if extra_params and not callable(extra_params):
+ extra_params = " " + extra_params.strip()
+
+ def make_link(m):
+ url = m.group(1)
+ proto = m.group(2)
+ if require_protocol and not proto:
+ return url # not protocol, no linkify
+
+ if proto and proto not in permitted_protocols:
+ return url # bad protocol, no linkify
+
+ href = m.group(1)
+ if not proto:
+ href = "http://" + href # no proto specified, use http
+
+ if callable(extra_params):
+ params = " " + extra_params(href).strip()
+ else:
+ params = extra_params
+
+ # clip long urls. max_len is just an approximation
+ max_len = 30
+ if shorten and len(url) > max_len:
+ before_clip = url
+ if proto:
+ proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
+ else:
+ proto_len = 0
+
+ parts = url[proto_len:].split("/")
+ if len(parts) > 1:
+ # Grab the whole host part plus the first bit of the path
+ # The path is usually not that interesting once shortened
+ # (no more slug, etc), so it really just provides a little
+ # extra indication of shortening.
+ url = url[:proto_len] + parts[0] + "/" + \
+ parts[1][:8].split('?')[0].split('.')[0]
+
+ if len(url) > max_len * 1.5: # still too long
+ url = url[:max_len]
+
+ if url != before_clip:
+ amp = url.rfind('&')
+ # avoid splitting html char entities
+ if amp > max_len - 5:
+ url = url[:amp]
+ url += "..."
+
+ if len(url) >= len(before_clip):
+ url = before_clip
+ else:
+ # full url is visible on mouse-over (for those who don't
+ # have a status bar, such as Safari by default)
+ params += ' title="%s"' % href
+
+ return u'<a href="%s"%s>%s</a>' % (href, params, url)
+
+ # First HTML-escape so that our strings are all safe.
+ # The regex is modified to avoid character entites other than &amp; so
+ # that we won't pick up &quot;, etc.
+ text = _unicode(xhtml_escape(text))
+ return _URL_RE.sub(make_link, text)
+
+
+def _convert_entity(m):
+ if m.group(1) == "#":
+ try:
+ if m.group(2)[:1].lower() == 'x':
+ return unichr(int(m.group(2)[1:], 16))
+ else:
+ return unichr(int(m.group(2)))
+ except ValueError:
+ return "&#%s;" % m.group(2)
+ try:
+ return _HTML_UNICODE_MAP[m.group(2)]
+ except KeyError:
+ return "&%s;" % m.group(2)
+
+
+def _build_unicode_map():
+ unicode_map = {}
+ for name, value in htmlentitydefs.name2codepoint.items():
+ unicode_map[name] = unichr(value)
+ return unicode_map
+
+
+_HTML_UNICODE_MAP = _build_unicode_map()
diff --git a/contrib/python/tornado/tornado-4/tornado/gen.py b/contrib/python/tornado/tornado-4/tornado/gen.py
index 89a4dd7c7a..cef0935a92 100644
--- a/contrib/python/tornado/tornado-4/tornado/gen.py
+++ b/contrib/python/tornado/tornado-4/tornado/gen.py
@@ -1,1304 +1,1304 @@
-"""``tornado.gen`` is a generator-based interface to make it easier to
-work in an asynchronous environment. Code using the ``gen`` module
-is technically asynchronous, but it is written as a single generator
-instead of a collection of separate functions.
-
-For example, the following asynchronous handler:
-
-.. testcode::
-
- class AsyncHandler(RequestHandler):
- @asynchronous
- def get(self):
- http_client = AsyncHTTPClient()
- http_client.fetch("http://example.com",
- callback=self.on_fetch)
-
- def on_fetch(self, response):
- do_something_with_response(response)
- self.render("template.html")
-
-.. testoutput::
- :hide:
-
-could be written with ``gen`` as:
-
-.. testcode::
-
- class GenAsyncHandler(RequestHandler):
- @gen.coroutine
- def get(self):
- http_client = AsyncHTTPClient()
- response = yield http_client.fetch("http://example.com")
- do_something_with_response(response)
- self.render("template.html")
-
-.. testoutput::
- :hide:
-
-Most asynchronous functions in Tornado return a `.Future`;
-yielding this object returns its `~.Future.result`.
-
-You can also yield a list or dict of ``Futures``, which will be
-started at the same time and run in parallel; a list or dict of results will
-be returned when they are all finished:
-
-.. testcode::
-
- @gen.coroutine
- def get(self):
- http_client = AsyncHTTPClient()
- response1, response2 = yield [http_client.fetch(url1),
- http_client.fetch(url2)]
- response_dict = yield dict(response3=http_client.fetch(url3),
- response4=http_client.fetch(url4))
- response3 = response_dict['response3']
- response4 = response_dict['response4']
-
-.. testoutput::
- :hide:
-
-If the `~functools.singledispatch` library is available (standard in
-Python 3.4, available via the `singledispatch
-<https://pypi.python.org/pypi/singledispatch>`_ package on older
-versions), additional types of objects may be yielded. Tornado includes
-support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
-``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
-See the `convert_yielded` function to extend this mechanism.
-
-.. versionchanged:: 3.2
- Dict support added.
-
-.. versionchanged:: 4.1
- Support added for yielding ``asyncio`` Futures and Twisted Deferreds
- via ``singledispatch``.
-
-"""
-from __future__ import absolute_import, division, print_function
-
-import collections
-import functools
-import itertools
-import os
-import sys
-import textwrap
-import types
-import weakref
-
-from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
-from tornado.ioloop import IOLoop
-from tornado.log import app_log
-from tornado import stack_context
-from tornado.util import PY3, raise_exc_info
-
-try:
- try:
- # py34+
- from functools import singledispatch # type: ignore
- except ImportError:
- from singledispatch import singledispatch # backport
-except ImportError:
- # In most cases, singledispatch is required (to avoid
- # difficult-to-diagnose problems in which the functionality
- # available differs depending on which invisble packages are
- # installed). However, in Google App Engine third-party
- # dependencies are more trouble so we allow this module to be
- # imported without it.
- if 'APPENGINE_RUNTIME' not in os.environ:
- raise
- singledispatch = None
-
-try:
- try:
- # py35+
- from collections.abc import Generator as GeneratorType # type: ignore
- except ImportError:
- from backports_abc import Generator as GeneratorType # type: ignore
-
- try:
- # py35+
- from inspect import isawaitable # type: ignore
- except ImportError:
- from backports_abc import isawaitable
-except ImportError:
- if 'APPENGINE_RUNTIME' not in os.environ:
- raise
- from types import GeneratorType
-
- def isawaitable(x): # type: ignore
- return False
-
-if PY3:
- import builtins
-else:
- import __builtin__ as builtins
-
-
-class KeyReuseError(Exception):
- pass
-
-
-class UnknownKeyError(Exception):
- pass
-
-
-class LeakedCallbackError(Exception):
- pass
-
-
-class BadYieldError(Exception):
- pass
-
-
-class ReturnValueIgnoredError(Exception):
- pass
-
-
-class TimeoutError(Exception):
- """Exception raised by ``with_timeout``."""
-
-
-def _value_from_stopiteration(e):
- try:
- # StopIteration has a value attribute beginning in py33.
- # So does our Return class.
- return e.value
- except AttributeError:
- pass
- try:
- # Cython backports coroutine functionality by putting the value in
- # e.args[0].
- return e.args[0]
- except (AttributeError, IndexError):
- return None
-
-
-def engine(func):
- """Callback-oriented decorator for asynchronous generators.
-
- This is an older interface; for new code that does not need to be
- compatible with versions of Tornado older than 3.0 the
- `coroutine` decorator is recommended instead.
-
- This decorator is similar to `coroutine`, except it does not
- return a `.Future` and the ``callback`` argument is not treated
- specially.
-
- In most cases, functions decorated with `engine` should take
- a ``callback`` argument and invoke it with their result when
- they are finished. One notable exception is the
- `~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
- which use ``self.finish()`` in place of a callback argument.
- """
- func = _make_coroutine_wrapper(func, replace_callback=False)
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- future = func(*args, **kwargs)
-
- def final_callback(future):
- if future.result() is not None:
- raise ReturnValueIgnoredError(
- "@gen.engine functions cannot return values: %r" %
- (future.result(),))
- # The engine interface doesn't give us any way to return
- # errors but to raise them into the stack context.
- # Save the stack context here to use when the Future has resolved.
- future.add_done_callback(stack_context.wrap(final_callback))
- return wrapper
-
-
-def coroutine(func, replace_callback=True):
- """Decorator for asynchronous generators.
-
- Any generator that yields objects from this module must be wrapped
- in either this decorator or `engine`.
-
- Coroutines may "return" by raising the special exception
- `Return(value) <Return>`. In Python 3.3+, it is also possible for
- the function to simply use the ``return value`` statement (prior to
- Python 3.3 generators were not allowed to also return values).
- In all versions of Python a coroutine that simply wishes to exit
- early may use the ``return`` statement without a value.
-
- Functions with this decorator return a `.Future`. Additionally,
- they may be called with a ``callback`` keyword argument, which
- will be invoked with the future's result when it resolves. If the
- coroutine fails, the callback will not be run and an exception
- will be raised into the surrounding `.StackContext`. The
- ``callback`` argument is not visible inside the decorated
- function; it is handled by the decorator itself.
-
- From the caller's perspective, ``@gen.coroutine`` is similar to
- the combination of ``@return_future`` and ``@gen.engine``.
-
- .. warning::
-
- When exceptions occur inside a coroutine, the exception
- information will be stored in the `.Future` object. You must
- examine the result of the `.Future` object, or the exception
- may go unnoticed by your code. This means yielding the function
- if called from another coroutine, using something like
- `.IOLoop.run_sync` for top-level calls, or passing the `.Future`
- to `.IOLoop.add_future`.
-
- """
- return _make_coroutine_wrapper(func, replace_callback=True)
-
-
-# Ties lifetime of runners to their result futures. Github Issue #1769
-# Generators, like any object in Python, must be strong referenced
-# in order to not be cleaned up by the garbage collector. When using
-# coroutines, the Runner object is what strong-refs the inner
-# generator. However, the only item that strong-reffed the Runner
-# was the last Future that the inner generator yielded (via the
-# Future's internal done_callback list). Usually this is enough, but
-# it is also possible for this Future to not have any strong references
-# other than other objects referenced by the Runner object (usually
-# when using other callback patterns and/or weakrefs). In this
-# situation, if a garbage collection ran, a cycle would be detected and
-# Runner objects could be destroyed along with their inner generators
-# and everything in their local scope.
-# This map provides strong references to Runner objects as long as
-# their result future objects also have strong references (typically
-# from the parent coroutine's Runner). This keeps the coroutine's
-# Runner alive.
-_futures_to_runners = weakref.WeakKeyDictionary()
-
-
-def _make_coroutine_wrapper(func, replace_callback):
- """The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
-
- The two decorators differ in their treatment of the ``callback``
- argument, so we cannot simply implement ``@engine`` in terms of
- ``@coroutine``.
- """
- # On Python 3.5, set the coroutine flag on our generator, to allow it
- # to be used with 'await'.
- wrapped = func
- if hasattr(types, 'coroutine'):
- func = types.coroutine(func)
-
- @functools.wraps(wrapped)
- def wrapper(*args, **kwargs):
- future = TracebackFuture()
-
- if replace_callback and 'callback' in kwargs:
- callback = kwargs.pop('callback')
- IOLoop.current().add_future(
- future, lambda future: callback(future.result()))
-
- try:
- result = func(*args, **kwargs)
- except (Return, StopIteration) as e:
- result = _value_from_stopiteration(e)
- except Exception:
- future.set_exc_info(sys.exc_info())
- return future
- else:
- if isinstance(result, GeneratorType):
- # Inline the first iteration of Runner.run. This lets us
- # avoid the cost of creating a Runner when the coroutine
- # never actually yields, which in turn allows us to
- # use "optional" coroutines in critical path code without
- # performance penalty for the synchronous case.
- try:
- orig_stack_contexts = stack_context._state.contexts
- yielded = next(result)
- if stack_context._state.contexts is not orig_stack_contexts:
- yielded = TracebackFuture()
- yielded.set_exception(
- stack_context.StackContextInconsistentError(
- 'stack_context inconsistency (probably caused '
- 'by yield within a "with StackContext" block)'))
- except (StopIteration, Return) as e:
- future.set_result(_value_from_stopiteration(e))
- except Exception:
- future.set_exc_info(sys.exc_info())
- else:
- _futures_to_runners[future] = Runner(result, future, yielded)
- yielded = None
- try:
- return future
- finally:
- # Subtle memory optimization: if next() raised an exception,
- # the future's exc_info contains a traceback which
- # includes this stack frame. This creates a cycle,
- # which will be collected at the next full GC but has
- # been shown to greatly increase memory usage of
- # benchmarks (relative to the refcount-based scheme
- # used in the absence of cycles). We can avoid the
- # cycle by clearing the local variable after we return it.
- future = None
- future.set_result(result)
- return future
-
- wrapper.__wrapped__ = wrapped
- wrapper.__tornado_coroutine__ = True
- return wrapper
-
-
-def is_coroutine_function(func):
- """Return whether *func* is a coroutine function, i.e. a function
- wrapped with `~.gen.coroutine`.
-
- .. versionadded:: 4.5
- """
- return getattr(func, '__tornado_coroutine__', False)
-
-
-class Return(Exception):
- """Special exception to return a value from a `coroutine`.
-
- If this exception is raised, its value argument is used as the
- result of the coroutine::
-
- @gen.coroutine
- def fetch_json(url):
- response = yield AsyncHTTPClient().fetch(url)
- raise gen.Return(json_decode(response.body))
-
- In Python 3.3, this exception is no longer necessary: the ``return``
- statement can be used directly to return a value (previously
- ``yield`` and ``return`` with a value could not be combined in the
- same function).
-
- By analogy with the return statement, the value argument is optional,
- but it is never necessary to ``raise gen.Return()``. The ``return``
- statement can be used with no arguments instead.
- """
- def __init__(self, value=None):
- super(Return, self).__init__()
- self.value = value
- # Cython recognizes subclasses of StopIteration with a .args tuple.
- self.args = (value,)
-
-
-class WaitIterator(object):
- """Provides an iterator to yield the results of futures as they finish.
-
- Yielding a set of futures like this:
-
- ``results = yield [future1, future2]``
-
- pauses the coroutine until both ``future1`` and ``future2``
- return, and then restarts the coroutine with the results of both
- futures. If either future is an exception, the expression will
- raise that exception and all the results will be lost.
-
- If you need to get the result of each future as soon as possible,
- or if you need the result of some futures even if others produce
- errors, you can use ``WaitIterator``::
-
- wait_iterator = gen.WaitIterator(future1, future2)
- while not wait_iterator.done():
- try:
- result = yield wait_iterator.next()
- except Exception as e:
- print("Error {} from {}".format(e, wait_iterator.current_future))
- else:
- print("Result {} received from {} at {}".format(
- result, wait_iterator.current_future,
- wait_iterator.current_index))
-
- Because results are returned as soon as they are available the
- output from the iterator *will not be in the same order as the
- input arguments*. If you need to know which future produced the
- current result, you can use the attributes
- ``WaitIterator.current_future``, or ``WaitIterator.current_index``
- to get the index of the future from the input list. (if keyword
- arguments were used in the construction of the `WaitIterator`,
- ``current_index`` will use the corresponding keyword).
-
- On Python 3.5, `WaitIterator` implements the async iterator
- protocol, so it can be used with the ``async for`` statement (note
- that in this version the entire iteration is aborted if any value
- raises an exception, while the previous example can continue past
- individual errors)::
-
- async for result in gen.WaitIterator(future1, future2):
- print("Result {} received from {} at {}".format(
- result, wait_iterator.current_future,
- wait_iterator.current_index))
-
- .. versionadded:: 4.1
-
- .. versionchanged:: 4.3
- Added ``async for`` support in Python 3.5.
-
- """
- def __init__(self, *args, **kwargs):
- if args and kwargs:
- raise ValueError(
- "You must provide args or kwargs, not both")
-
- if kwargs:
- self._unfinished = dict((f, k) for (k, f) in kwargs.items())
- futures = list(kwargs.values())
- else:
- self._unfinished = dict((f, i) for (i, f) in enumerate(args))
- futures = args
-
- self._finished = collections.deque()
- self.current_index = self.current_future = None
- self._running_future = None
-
- for future in futures:
- future.add_done_callback(self._done_callback)
-
- def done(self):
- """Returns True if this iterator has no more results."""
- if self._finished or self._unfinished:
- return False
- # Clear the 'current' values when iteration is done.
- self.current_index = self.current_future = None
- return True
-
- def next(self):
- """Returns a `.Future` that will yield the next available result.
-
- Note that this `.Future` will not be the same object as any of
- the inputs.
- """
- self._running_future = TracebackFuture()
-
- if self._finished:
- self._return_result(self._finished.popleft())
-
- return self._running_future
-
- def _done_callback(self, done):
- if self._running_future and not self._running_future.done():
- self._return_result(done)
- else:
- self._finished.append(done)
-
- def _return_result(self, done):
- """Called set the returned future's state that of the future
- we yielded, and set the current future for the iterator.
- """
- chain_future(done, self._running_future)
-
- self.current_future = done
- self.current_index = self._unfinished.pop(done)
-
- @coroutine
- def __aiter__(self):
- raise Return(self)
-
- def __anext__(self):
- if self.done():
- # Lookup by name to silence pyflakes on older versions.
- raise getattr(builtins, 'StopAsyncIteration')()
- return self.next()
-
-
-class YieldPoint(object):
- """Base class for objects that may be yielded from the generator.
-
- .. deprecated:: 4.0
- Use `Futures <.Future>` instead.
- """
- def start(self, runner):
- """Called by the runner after the generator has yielded.
-
- No other methods will be called on this object before ``start``.
- """
- raise NotImplementedError()
-
- def is_ready(self):
- """Called by the runner to determine whether to resume the generator.
-
- Returns a boolean; may be called more than once.
- """
- raise NotImplementedError()
-
- def get_result(self):
- """Returns the value to use as the result of the yield expression.
-
- This method will only be called once, and only after `is_ready`
- has returned true.
- """
- raise NotImplementedError()
-
-
-class Callback(YieldPoint):
- """Returns a callable object that will allow a matching `Wait` to proceed.
-
- The key may be any value suitable for use as a dictionary key, and is
- used to match ``Callbacks`` to their corresponding ``Waits``. The key
- must be unique among outstanding callbacks within a single run of the
- generator function, but may be reused across different runs of the same
- function (so constants generally work fine).
-
- The callback may be called with zero or one arguments; if an argument
- is given it will be returned by `Wait`.
-
- .. deprecated:: 4.0
- Use `Futures <.Future>` instead.
- """
- def __init__(self, key):
- self.key = key
-
- def start(self, runner):
- self.runner = runner
- runner.register_callback(self.key)
-
- def is_ready(self):
- return True
-
- def get_result(self):
- return self.runner.result_callback(self.key)
-
-
-class Wait(YieldPoint):
- """Returns the argument passed to the result of a previous `Callback`.
-
- .. deprecated:: 4.0
- Use `Futures <.Future>` instead.
- """
- def __init__(self, key):
- self.key = key
-
- def start(self, runner):
- self.runner = runner
-
- def is_ready(self):
- return self.runner.is_ready(self.key)
-
- def get_result(self):
- return self.runner.pop_result(self.key)
-
-
-class WaitAll(YieldPoint):
- """Returns the results of multiple previous `Callbacks <Callback>`.
-
- The argument is a sequence of `Callback` keys, and the result is
- a list of results in the same order.
-
- `WaitAll` is equivalent to yielding a list of `Wait` objects.
-
- .. deprecated:: 4.0
- Use `Futures <.Future>` instead.
- """
- def __init__(self, keys):
- self.keys = keys
-
- def start(self, runner):
- self.runner = runner
-
- def is_ready(self):
- return all(self.runner.is_ready(key) for key in self.keys)
-
- def get_result(self):
- return [self.runner.pop_result(key) for key in self.keys]
-
-
-def Task(func, *args, **kwargs):
- """Adapts a callback-based asynchronous function for use in coroutines.
-
- Takes a function (and optional additional arguments) and runs it with
- those arguments plus a ``callback`` keyword argument. The argument passed
- to the callback is returned as the result of the yield expression.
-
- .. versionchanged:: 4.0
- ``gen.Task`` is now a function that returns a `.Future`, instead of
- a subclass of `YieldPoint`. It still behaves the same way when
- yielded.
- """
- future = Future()
-
- def handle_exception(typ, value, tb):
- if future.done():
- return False
- future.set_exc_info((typ, value, tb))
- return True
-
- def set_result(result):
- if future.done():
- return
- future.set_result(result)
- with stack_context.ExceptionStackContext(handle_exception):
- func(*args, callback=_argument_adapter(set_result), **kwargs)
- return future
-
-
-class YieldFuture(YieldPoint):
- def __init__(self, future, io_loop=None):
- """Adapts a `.Future` to the `YieldPoint` interface.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- self.future = future
- self.io_loop = io_loop or IOLoop.current()
-
- def start(self, runner):
- if not self.future.done():
- self.runner = runner
- self.key = object()
- runner.register_callback(self.key)
- self.io_loop.add_future(self.future, runner.result_callback(self.key))
- else:
- self.runner = None
- self.result_fn = self.future.result
-
- def is_ready(self):
- if self.runner is not None:
- return self.runner.is_ready(self.key)
- else:
- return True
-
- def get_result(self):
- if self.runner is not None:
- return self.runner.pop_result(self.key).result()
- else:
- return self.result_fn()
-
-
-def _contains_yieldpoint(children):
- """Returns True if ``children`` contains any YieldPoints.
-
- ``children`` may be a dict or a list, as used by `MultiYieldPoint`
- and `multi_future`.
- """
- if isinstance(children, dict):
- return any(isinstance(i, YieldPoint) for i in children.values())
- if isinstance(children, list):
- return any(isinstance(i, YieldPoint) for i in children)
- return False
-
-
-def multi(children, quiet_exceptions=()):
- """Runs multiple asynchronous operations in parallel.
-
- ``children`` may either be a list or a dict whose values are
- yieldable objects. ``multi()`` returns a new yieldable
- object that resolves to a parallel structure containing their
- results. If ``children`` is a list, the result is a list of
- results in the same order; if it is a dict, the result is a dict
- with the same keys.
-
- That is, ``results = yield multi(list_of_futures)`` is equivalent
- to::
-
- results = []
- for future in list_of_futures:
- results.append(yield future)
-
- If any children raise exceptions, ``multi()`` will raise the first
- one. All others will be logged, unless they are of types
- contained in the ``quiet_exceptions`` argument.
-
- If any of the inputs are `YieldPoints <YieldPoint>`, the returned
- yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`.
- This means that the result of `multi` can be used in a native
- coroutine if and only if all of its children can be.
-
- In a ``yield``-based coroutine, it is not normally necessary to
- call this function directly, since the coroutine runner will
- do it automatically when a list or dict is yielded. However,
- it is necessary in ``await``-based coroutines, or to pass
- the ``quiet_exceptions`` argument.
-
- This function is available under the names ``multi()`` and ``Multi()``
- for historical reasons.
-
- .. versionchanged:: 4.2
- If multiple yieldables fail, any exceptions after the first
- (which is raised) will be logged. Added the ``quiet_exceptions``
- argument to suppress this logging for selected exception types.
-
- .. versionchanged:: 4.3
- Replaced the class ``Multi`` and the function ``multi_future``
- with a unified function ``multi``. Added support for yieldables
- other than `YieldPoint` and `.Future`.
-
- """
- if _contains_yieldpoint(children):
- return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions)
- else:
- return multi_future(children, quiet_exceptions=quiet_exceptions)
-
-
-Multi = multi
-
-
-class MultiYieldPoint(YieldPoint):
- """Runs multiple asynchronous operations in parallel.
-
- This class is similar to `multi`, but it always creates a stack
- context even when no children require it. It is not compatible with
- native coroutines.
-
- .. versionchanged:: 4.2
- If multiple ``YieldPoints`` fail, any exceptions after the first
- (which is raised) will be logged. Added the ``quiet_exceptions``
- argument to suppress this logging for selected exception types.
-
- .. versionchanged:: 4.3
- Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi``
- remains as an alias for the equivalent `multi` function.
-
- .. deprecated:: 4.3
- Use `multi` instead.
- """
- def __init__(self, children, quiet_exceptions=()):
- self.keys = None
- if isinstance(children, dict):
- self.keys = list(children.keys())
- children = children.values()
- self.children = []
- for i in children:
- if not isinstance(i, YieldPoint):
- i = convert_yielded(i)
- if is_future(i):
- i = YieldFuture(i)
- self.children.append(i)
- assert all(isinstance(i, YieldPoint) for i in self.children)
- self.unfinished_children = set(self.children)
- self.quiet_exceptions = quiet_exceptions
-
- def start(self, runner):
- for i in self.children:
- i.start(runner)
-
- def is_ready(self):
- finished = list(itertools.takewhile(
- lambda i: i.is_ready(), self.unfinished_children))
- self.unfinished_children.difference_update(finished)
- return not self.unfinished_children
-
- def get_result(self):
- result_list = []
- exc_info = None
- for f in self.children:
- try:
- result_list.append(f.get_result())
- except Exception as e:
- if exc_info is None:
- exc_info = sys.exc_info()
- else:
- if not isinstance(e, self.quiet_exceptions):
- app_log.error("Multiple exceptions in yield list",
- exc_info=True)
- if exc_info is not None:
- raise_exc_info(exc_info)
- if self.keys is not None:
- return dict(zip(self.keys, result_list))
- else:
- return list(result_list)
-
-
-def multi_future(children, quiet_exceptions=()):
- """Wait for multiple asynchronous futures in parallel.
-
- This function is similar to `multi`, but does not support
- `YieldPoints <YieldPoint>`.
-
- .. versionadded:: 4.0
-
- .. versionchanged:: 4.2
- If multiple ``Futures`` fail, any exceptions after the first (which is
- raised) will be logged. Added the ``quiet_exceptions``
- argument to suppress this logging for selected exception types.
-
- .. deprecated:: 4.3
- Use `multi` instead.
- """
- if isinstance(children, dict):
- keys = list(children.keys())
- children = children.values()
- else:
- keys = None
- children = list(map(convert_yielded, children))
- assert all(is_future(i) for i in children)
- unfinished_children = set(children)
-
- future = Future()
- if not children:
- future.set_result({} if keys is not None else [])
-
- def callback(f):
- unfinished_children.remove(f)
- if not unfinished_children:
- result_list = []
- for f in children:
- try:
- result_list.append(f.result())
- except Exception as e:
- if future.done():
- if not isinstance(e, quiet_exceptions):
- app_log.error("Multiple exceptions in yield list",
- exc_info=True)
- else:
- future.set_exc_info(sys.exc_info())
- if not future.done():
- if keys is not None:
- future.set_result(dict(zip(keys, result_list)))
- else:
- future.set_result(result_list)
-
- listening = set()
- for f in children:
- if f not in listening:
- listening.add(f)
- f.add_done_callback(callback)
- return future
-
-
-def maybe_future(x):
- """Converts ``x`` into a `.Future`.
-
- If ``x`` is already a `.Future`, it is simply returned; otherwise
- it is wrapped in a new `.Future`. This is suitable for use as
- ``result = yield gen.maybe_future(f())`` when you don't know whether
- ``f()`` returns a `.Future` or not.
-
- .. deprecated:: 4.3
- This function only handles ``Futures``, not other yieldable objects.
- Instead of `maybe_future`, check for the non-future result types
- you expect (often just ``None``), and ``yield`` anything unknown.
- """
- if is_future(x):
- return x
- else:
- fut = Future()
- fut.set_result(x)
- return fut
-
-
-def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
- """Wraps a `.Future` (or other yieldable object) in a timeout.
-
- Raises `TimeoutError` if the input future does not complete before
- ``timeout``, which may be specified in any form allowed by
- `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
- relative to `.IOLoop.time`)
-
- If the wrapped `.Future` fails after it has timed out, the exception
- will be logged unless it is of a type contained in ``quiet_exceptions``
- (which may be an exception type or a sequence of types).
-
- Does not support `YieldPoint` subclasses.
-
- .. versionadded:: 4.0
-
- .. versionchanged:: 4.1
- Added the ``quiet_exceptions`` argument and the logging of unhandled
- exceptions.
-
- .. versionchanged:: 4.4
- Added support for yieldable objects other than `.Future`.
- """
- # TODO: allow YieldPoints in addition to other yieldables?
- # Tricky to do with stack_context semantics.
- #
- # It's tempting to optimize this by cancelling the input future on timeout
- # instead of creating a new one, but A) we can't know if we are the only
- # one waiting on the input future, so cancelling it might disrupt other
- # callers and B) concurrent futures can only be cancelled while they are
- # in the queue, so cancellation cannot reliably bound our waiting time.
- future = convert_yielded(future)
- result = Future()
- chain_future(future, result)
- if io_loop is None:
- io_loop = IOLoop.current()
-
- def error_callback(future):
- try:
- future.result()
- except Exception as e:
- if not isinstance(e, quiet_exceptions):
- app_log.error("Exception in Future %r after timeout",
- future, exc_info=True)
-
- def timeout_callback():
- if not result.done():
- result.set_exception(TimeoutError("Timeout"))
- # In case the wrapped future goes on to fail, log it.
- future.add_done_callback(error_callback)
- timeout_handle = io_loop.add_timeout(
- timeout, timeout_callback)
- if isinstance(future, Future):
- # We know this future will resolve on the IOLoop, so we don't
- # need the extra thread-safety of IOLoop.add_future (and we also
- # don't care about StackContext here.
- future.add_done_callback(
- lambda future: io_loop.remove_timeout(timeout_handle))
- else:
- # concurrent.futures.Futures may resolve on any thread, so we
- # need to route them back to the IOLoop.
- io_loop.add_future(
- future, lambda future: io_loop.remove_timeout(timeout_handle))
- return result
-
-
-def sleep(duration):
- """Return a `.Future` that resolves after the given number of seconds.
-
- When used with ``yield`` in a coroutine, this is a non-blocking
- analogue to `time.sleep` (which should not be used in coroutines
- because it is blocking)::
-
- yield gen.sleep(0.5)
-
- Note that calling this function on its own does nothing; you must
- wait on the `.Future` it returns (usually by yielding it).
-
- .. versionadded:: 4.1
- """
- f = Future()
- IOLoop.current().call_later(duration, lambda: f.set_result(None))
- return f
-
-
-_null_future = Future()
-_null_future.set_result(None)
-
-moment = Future()
-moment.__doc__ = \
- """A special object which may be yielded to allow the IOLoop to run for
-one iteration.
-
-This is not needed in normal use but it can be helpful in long-running
-coroutines that are likely to yield Futures that are ready instantly.
-
-Usage: ``yield gen.moment``
-
-.. versionadded:: 4.0
-
-.. deprecated:: 4.5
- ``yield None`` is now equivalent to ``yield gen.moment``.
-"""
-moment.set_result(None)
-
-
-class Runner(object):
- """Internal implementation of `tornado.gen.engine`.
-
- Maintains information about pending callbacks and their results.
-
- The results of the generator are stored in ``result_future`` (a
- `.TracebackFuture`)
- """
- def __init__(self, gen, result_future, first_yielded):
- self.gen = gen
- self.result_future = result_future
- self.future = _null_future
- self.yield_point = None
- self.pending_callbacks = None
- self.results = None
- self.running = False
- self.finished = False
- self.had_exception = False
- self.io_loop = IOLoop.current()
- # For efficiency, we do not create a stack context until we
- # reach a YieldPoint (stack contexts are required for the historical
- # semantics of YieldPoints, but not for Futures). When we have
- # done so, this field will be set and must be called at the end
- # of the coroutine.
- self.stack_context_deactivate = None
- if self.handle_yield(first_yielded):
- gen = result_future = first_yielded = None
- self.run()
-
- def register_callback(self, key):
- """Adds ``key`` to the list of callbacks."""
- if self.pending_callbacks is None:
- # Lazily initialize the old-style YieldPoint data structures.
- self.pending_callbacks = set()
- self.results = {}
- if key in self.pending_callbacks:
- raise KeyReuseError("key %r is already pending" % (key,))
- self.pending_callbacks.add(key)
-
- def is_ready(self, key):
- """Returns true if a result is available for ``key``."""
- if self.pending_callbacks is None or key not in self.pending_callbacks:
- raise UnknownKeyError("key %r is not pending" % (key,))
- return key in self.results
-
- def set_result(self, key, result):
- """Sets the result for ``key`` and attempts to resume the generator."""
- self.results[key] = result
- if self.yield_point is not None and self.yield_point.is_ready():
- try:
- self.future.set_result(self.yield_point.get_result())
- except:
- self.future.set_exc_info(sys.exc_info())
- self.yield_point = None
- self.run()
-
- def pop_result(self, key):
- """Returns the result for ``key`` and unregisters it."""
- self.pending_callbacks.remove(key)
- return self.results.pop(key)
-
- def run(self):
- """Starts or resumes the generator, running until it reaches a
- yield point that is not ready.
- """
- if self.running or self.finished:
- return
- try:
- self.running = True
- while True:
- future = self.future
- if not future.done():
- return
- self.future = None
- try:
- orig_stack_contexts = stack_context._state.contexts
- exc_info = None
-
- try:
- value = future.result()
- except Exception:
- self.had_exception = True
- exc_info = sys.exc_info()
- future = None
-
- if exc_info is not None:
- try:
- yielded = self.gen.throw(*exc_info)
- finally:
- # Break up a reference to itself
- # for faster GC on CPython.
- exc_info = None
- else:
- yielded = self.gen.send(value)
-
- if stack_context._state.contexts is not orig_stack_contexts:
- self.gen.throw(
- stack_context.StackContextInconsistentError(
- 'stack_context inconsistency (probably caused '
- 'by yield within a "with StackContext" block)'))
- except (StopIteration, Return) as e:
- self.finished = True
- self.future = _null_future
- if self.pending_callbacks and not self.had_exception:
- # If we ran cleanly without waiting on all callbacks
- # raise an error (really more of a warning). If we
- # had an exception then some callbacks may have been
- # orphaned, so skip the check in that case.
- raise LeakedCallbackError(
- "finished without waiting for callbacks %r" %
- self.pending_callbacks)
- self.result_future.set_result(_value_from_stopiteration(e))
- self.result_future = None
- self._deactivate_stack_context()
- return
- except Exception:
- self.finished = True
- self.future = _null_future
- self.result_future.set_exc_info(sys.exc_info())
- self.result_future = None
- self._deactivate_stack_context()
- return
- if not self.handle_yield(yielded):
- return
- yielded = None
- finally:
- self.running = False
-
- def handle_yield(self, yielded):
- # Lists containing YieldPoints require stack contexts;
- # other lists are handled in convert_yielded.
- if _contains_yieldpoint(yielded):
- yielded = multi(yielded)
-
- if isinstance(yielded, YieldPoint):
- # YieldPoints are too closely coupled to the Runner to go
- # through the generic convert_yielded mechanism.
- self.future = TracebackFuture()
-
- def start_yield_point():
- try:
- yielded.start(self)
- if yielded.is_ready():
- self.future.set_result(
- yielded.get_result())
- else:
- self.yield_point = yielded
- except Exception:
- self.future = TracebackFuture()
- self.future.set_exc_info(sys.exc_info())
-
- if self.stack_context_deactivate is None:
- # Start a stack context if this is the first
- # YieldPoint we've seen.
- with stack_context.ExceptionStackContext(
- self.handle_exception) as deactivate:
- self.stack_context_deactivate = deactivate
-
- def cb():
- start_yield_point()
- self.run()
- self.io_loop.add_callback(cb)
- return False
- else:
- start_yield_point()
- else:
- try:
- self.future = convert_yielded(yielded)
- except BadYieldError:
- self.future = TracebackFuture()
- self.future.set_exc_info(sys.exc_info())
-
- if not self.future.done() or self.future is moment:
- def inner(f):
- # Break a reference cycle to speed GC.
- f = None # noqa
- self.run()
- self.io_loop.add_future(
- self.future, inner)
- return False
- return True
-
- def result_callback(self, key):
- return stack_context.wrap(_argument_adapter(
- functools.partial(self.set_result, key)))
-
- def handle_exception(self, typ, value, tb):
- if not self.running and not self.finished:
- self.future = TracebackFuture()
- self.future.set_exc_info((typ, value, tb))
- self.run()
- return True
- else:
- return False
-
- def _deactivate_stack_context(self):
- if self.stack_context_deactivate is not None:
- self.stack_context_deactivate()
- self.stack_context_deactivate = None
-
-
-Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
-
-
-def _argument_adapter(callback):
- """Returns a function that when invoked runs ``callback`` with one arg.
-
- If the function returned by this function is called with exactly
- one argument, that argument is passed to ``callback``. Otherwise
- the args tuple and kwargs dict are wrapped in an `Arguments` object.
- """
- def wrapper(*args, **kwargs):
- if kwargs or len(args) > 1:
- callback(Arguments(args, kwargs))
- elif args:
- callback(args[0])
- else:
- callback(None)
- return wrapper
-
-
-# Convert Awaitables into Futures. It is unfortunately possible
-# to have infinite recursion here if those Awaitables assume that
-# we're using a different coroutine runner and yield objects
-# we don't understand. If that happens, the solution is to
-# register that runner's yieldable objects with convert_yielded.
-if sys.version_info >= (3, 3):
- exec(textwrap.dedent("""
- @coroutine
- def _wrap_awaitable(x):
- if hasattr(x, '__await__'):
- x = x.__await__()
- return (yield from x)
- """))
-else:
- # Py2-compatible version for use with Cython.
- # Copied from PEP 380.
- @coroutine
- def _wrap_awaitable(x):
- if hasattr(x, '__await__'):
- _i = x.__await__()
- else:
- _i = iter(x)
- try:
- _y = next(_i)
- except StopIteration as _e:
- _r = _value_from_stopiteration(_e)
- else:
- while 1:
- try:
- _s = yield _y
- except GeneratorExit as _e:
- try:
- _m = _i.close
- except AttributeError:
- pass
- else:
- _m()
- raise _e
- except BaseException as _e:
- _x = sys.exc_info()
- try:
- _m = _i.throw
- except AttributeError:
- raise _e
- else:
- try:
- _y = _m(*_x)
- except StopIteration as _e:
- _r = _value_from_stopiteration(_e)
- break
- else:
- try:
- if _s is None:
- _y = next(_i)
- else:
- _y = _i.send(_s)
- except StopIteration as _e:
- _r = _value_from_stopiteration(_e)
- break
- raise Return(_r)
-
-
-def convert_yielded(yielded):
- """Convert a yielded object into a `.Future`.
-
- The default implementation accepts lists, dictionaries, and Futures.
-
- If the `~functools.singledispatch` library is available, this function
- may be extended to support additional types. For example::
-
- @convert_yielded.register(asyncio.Future)
- def _(asyncio_future):
- return tornado.platform.asyncio.to_tornado_future(asyncio_future)
-
- .. versionadded:: 4.1
- """
- # Lists and dicts containing YieldPoints were handled earlier.
- if yielded is None:
- return moment
- elif isinstance(yielded, (list, dict)):
- return multi(yielded)
- elif is_future(yielded):
- return yielded
- elif isawaitable(yielded):
- return _wrap_awaitable(yielded)
- else:
- raise BadYieldError("yielded unknown object %r" % (yielded,))
-
-
-if singledispatch is not None:
- convert_yielded = singledispatch(convert_yielded)
-
- try:
- # If we can import t.p.asyncio, do it for its side effect
- # (registering asyncio.Future with convert_yielded).
- # It's ugly to do this here, but it prevents a cryptic
- # infinite recursion in _wrap_awaitable.
- # Note that even with this, asyncio integration is unlikely
- # to work unless the application also configures AsyncIOLoop,
- # but at least the error messages in that case are more
- # comprehensible than a stack overflow.
- import tornado.platform.asyncio
- except ImportError:
- pass
- else:
- # Reference the imported module to make pyflakes happy.
- tornado
+"""``tornado.gen`` is a generator-based interface to make it easier to
+work in an asynchronous environment. Code using the ``gen`` module
+is technically asynchronous, but it is written as a single generator
+instead of a collection of separate functions.
+
+For example, the following asynchronous handler:
+
+.. testcode::
+
+ class AsyncHandler(RequestHandler):
+ @asynchronous
+ def get(self):
+ http_client = AsyncHTTPClient()
+ http_client.fetch("http://example.com",
+ callback=self.on_fetch)
+
+ def on_fetch(self, response):
+ do_something_with_response(response)
+ self.render("template.html")
+
+.. testoutput::
+ :hide:
+
+could be written with ``gen`` as:
+
+.. testcode::
+
+ class GenAsyncHandler(RequestHandler):
+ @gen.coroutine
+ def get(self):
+ http_client = AsyncHTTPClient()
+ response = yield http_client.fetch("http://example.com")
+ do_something_with_response(response)
+ self.render("template.html")
+
+.. testoutput::
+ :hide:
+
+Most asynchronous functions in Tornado return a `.Future`;
+yielding this object returns its `~.Future.result`.
+
+You can also yield a list or dict of ``Futures``, which will be
+started at the same time and run in parallel; a list or dict of results will
+be returned when they are all finished:
+
+.. testcode::
+
+ @gen.coroutine
+ def get(self):
+ http_client = AsyncHTTPClient()
+ response1, response2 = yield [http_client.fetch(url1),
+ http_client.fetch(url2)]
+ response_dict = yield dict(response3=http_client.fetch(url3),
+ response4=http_client.fetch(url4))
+ response3 = response_dict['response3']
+ response4 = response_dict['response4']
+
+.. testoutput::
+ :hide:
+
+If the `~functools.singledispatch` library is available (standard in
+Python 3.4, available via the `singledispatch
+<https://pypi.python.org/pypi/singledispatch>`_ package on older
+versions), additional types of objects may be yielded. Tornado includes
+support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
+``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
+See the `convert_yielded` function to extend this mechanism.
+
+.. versionchanged:: 3.2
+ Dict support added.
+
+.. versionchanged:: 4.1
+ Support added for yielding ``asyncio`` Futures and Twisted Deferreds
+ via ``singledispatch``.
+
+"""
+from __future__ import absolute_import, division, print_function
+
+import collections
+import functools
+import itertools
+import os
+import sys
+import textwrap
+import types
+import weakref
+
+from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
+from tornado.ioloop import IOLoop
+from tornado.log import app_log
+from tornado import stack_context
+from tornado.util import PY3, raise_exc_info
+
+try:
+ try:
+ # py34+
+ from functools import singledispatch # type: ignore
+ except ImportError:
+ from singledispatch import singledispatch # backport
+except ImportError:
+ # In most cases, singledispatch is required (to avoid
+ # difficult-to-diagnose problems in which the functionality
+ # available differs depending on which invisble packages are
+ # installed). However, in Google App Engine third-party
+ # dependencies are more trouble so we allow this module to be
+ # imported without it.
+ if 'APPENGINE_RUNTIME' not in os.environ:
+ raise
+ singledispatch = None
+
+try:
+ try:
+ # py35+
+ from collections.abc import Generator as GeneratorType # type: ignore
+ except ImportError:
+ from backports_abc import Generator as GeneratorType # type: ignore
+
+ try:
+ # py35+
+ from inspect import isawaitable # type: ignore
+ except ImportError:
+ from backports_abc import isawaitable
+except ImportError:
+ if 'APPENGINE_RUNTIME' not in os.environ:
+ raise
+ from types import GeneratorType
+
+ def isawaitable(x): # type: ignore
+ return False
+
+if PY3:
+ import builtins
+else:
+ import __builtin__ as builtins
+
+
+class KeyReuseError(Exception):
+ pass
+
+
+class UnknownKeyError(Exception):
+ pass
+
+
+class LeakedCallbackError(Exception):
+ pass
+
+
+class BadYieldError(Exception):
+ pass
+
+
+class ReturnValueIgnoredError(Exception):
+ pass
+
+
+class TimeoutError(Exception):
+ """Exception raised by ``with_timeout``."""
+
+
+def _value_from_stopiteration(e):
+ try:
+ # StopIteration has a value attribute beginning in py33.
+ # So does our Return class.
+ return e.value
+ except AttributeError:
+ pass
+ try:
+ # Cython backports coroutine functionality by putting the value in
+ # e.args[0].
+ return e.args[0]
+ except (AttributeError, IndexError):
+ return None
+
+
+def engine(func):
+ """Callback-oriented decorator for asynchronous generators.
+
+ This is an older interface; for new code that does not need to be
+ compatible with versions of Tornado older than 3.0 the
+ `coroutine` decorator is recommended instead.
+
+ This decorator is similar to `coroutine`, except it does not
+ return a `.Future` and the ``callback`` argument is not treated
+ specially.
+
+ In most cases, functions decorated with `engine` should take
+ a ``callback`` argument and invoke it with their result when
+ they are finished. One notable exception is the
+ `~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
+ which use ``self.finish()`` in place of a callback argument.
+ """
+ func = _make_coroutine_wrapper(func, replace_callback=False)
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ future = func(*args, **kwargs)
+
+ def final_callback(future):
+ if future.result() is not None:
+ raise ReturnValueIgnoredError(
+ "@gen.engine functions cannot return values: %r" %
+ (future.result(),))
+ # The engine interface doesn't give us any way to return
+ # errors but to raise them into the stack context.
+ # Save the stack context here to use when the Future has resolved.
+ future.add_done_callback(stack_context.wrap(final_callback))
+ return wrapper
+
+
+def coroutine(func, replace_callback=True):
+ """Decorator for asynchronous generators.
+
+ Any generator that yields objects from this module must be wrapped
+ in either this decorator or `engine`.
+
+ Coroutines may "return" by raising the special exception
+ `Return(value) <Return>`. In Python 3.3+, it is also possible for
+ the function to simply use the ``return value`` statement (prior to
+ Python 3.3 generators were not allowed to also return values).
+ In all versions of Python a coroutine that simply wishes to exit
+ early may use the ``return`` statement without a value.
+
+ Functions with this decorator return a `.Future`. Additionally,
+ they may be called with a ``callback`` keyword argument, which
+ will be invoked with the future's result when it resolves. If the
+ coroutine fails, the callback will not be run and an exception
+ will be raised into the surrounding `.StackContext`. The
+ ``callback`` argument is not visible inside the decorated
+ function; it is handled by the decorator itself.
+
+ From the caller's perspective, ``@gen.coroutine`` is similar to
+ the combination of ``@return_future`` and ``@gen.engine``.
+
+ .. warning::
+
+ When exceptions occur inside a coroutine, the exception
+ information will be stored in the `.Future` object. You must
+ examine the result of the `.Future` object, or the exception
+ may go unnoticed by your code. This means yielding the function
+ if called from another coroutine, using something like
+ `.IOLoop.run_sync` for top-level calls, or passing the `.Future`
+ to `.IOLoop.add_future`.
+
+ """
+ return _make_coroutine_wrapper(func, replace_callback=True)
+
+
+# Ties lifetime of runners to their result futures. Github Issue #1769
+# Generators, like any object in Python, must be strong referenced
+# in order to not be cleaned up by the garbage collector. When using
+# coroutines, the Runner object is what strong-refs the inner
+# generator. However, the only item that strong-reffed the Runner
+# was the last Future that the inner generator yielded (via the
+# Future's internal done_callback list). Usually this is enough, but
+# it is also possible for this Future to not have any strong references
+# other than other objects referenced by the Runner object (usually
+# when using other callback patterns and/or weakrefs). In this
+# situation, if a garbage collection ran, a cycle would be detected and
+# Runner objects could be destroyed along with their inner generators
+# and everything in their local scope.
+# This map provides strong references to Runner objects as long as
+# their result future objects also have strong references (typically
+# from the parent coroutine's Runner). This keeps the coroutine's
+# Runner alive.
+_futures_to_runners = weakref.WeakKeyDictionary()
+
+
+def _make_coroutine_wrapper(func, replace_callback):
+ """The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
+
+ The two decorators differ in their treatment of the ``callback``
+ argument, so we cannot simply implement ``@engine`` in terms of
+ ``@coroutine``.
+ """
+ # On Python 3.5, set the coroutine flag on our generator, to allow it
+ # to be used with 'await'.
+ wrapped = func
+ if hasattr(types, 'coroutine'):
+ func = types.coroutine(func)
+
+ @functools.wraps(wrapped)
+ def wrapper(*args, **kwargs):
+ future = TracebackFuture()
+
+ if replace_callback and 'callback' in kwargs:
+ callback = kwargs.pop('callback')
+ IOLoop.current().add_future(
+ future, lambda future: callback(future.result()))
+
+ try:
+ result = func(*args, **kwargs)
+ except (Return, StopIteration) as e:
+ result = _value_from_stopiteration(e)
+ except Exception:
+ future.set_exc_info(sys.exc_info())
+ return future
+ else:
+ if isinstance(result, GeneratorType):
+ # Inline the first iteration of Runner.run. This lets us
+ # avoid the cost of creating a Runner when the coroutine
+ # never actually yields, which in turn allows us to
+ # use "optional" coroutines in critical path code without
+ # performance penalty for the synchronous case.
+ try:
+ orig_stack_contexts = stack_context._state.contexts
+ yielded = next(result)
+ if stack_context._state.contexts is not orig_stack_contexts:
+ yielded = TracebackFuture()
+ yielded.set_exception(
+ stack_context.StackContextInconsistentError(
+ 'stack_context inconsistency (probably caused '
+ 'by yield within a "with StackContext" block)'))
+ except (StopIteration, Return) as e:
+ future.set_result(_value_from_stopiteration(e))
+ except Exception:
+ future.set_exc_info(sys.exc_info())
+ else:
+ _futures_to_runners[future] = Runner(result, future, yielded)
+ yielded = None
+ try:
+ return future
+ finally:
+ # Subtle memory optimization: if next() raised an exception,
+ # the future's exc_info contains a traceback which
+ # includes this stack frame. This creates a cycle,
+ # which will be collected at the next full GC but has
+ # been shown to greatly increase memory usage of
+ # benchmarks (relative to the refcount-based scheme
+ # used in the absence of cycles). We can avoid the
+ # cycle by clearing the local variable after we return it.
+ future = None
+ future.set_result(result)
+ return future
+
+ wrapper.__wrapped__ = wrapped
+ wrapper.__tornado_coroutine__ = True
+ return wrapper
+
+
+def is_coroutine_function(func):
+ """Return whether *func* is a coroutine function, i.e. a function
+ wrapped with `~.gen.coroutine`.
+
+ .. versionadded:: 4.5
+ """
+ return getattr(func, '__tornado_coroutine__', False)
+
+
+class Return(Exception):
+ """Special exception to return a value from a `coroutine`.
+
+ If this exception is raised, its value argument is used as the
+ result of the coroutine::
+
+ @gen.coroutine
+ def fetch_json(url):
+ response = yield AsyncHTTPClient().fetch(url)
+ raise gen.Return(json_decode(response.body))
+
+ In Python 3.3, this exception is no longer necessary: the ``return``
+ statement can be used directly to return a value (previously
+ ``yield`` and ``return`` with a value could not be combined in the
+ same function).
+
+ By analogy with the return statement, the value argument is optional,
+ but it is never necessary to ``raise gen.Return()``. The ``return``
+ statement can be used with no arguments instead.
+ """
+ def __init__(self, value=None):
+ super(Return, self).__init__()
+ self.value = value
+ # Cython recognizes subclasses of StopIteration with a .args tuple.
+ self.args = (value,)
+
+
+class WaitIterator(object):
+ """Provides an iterator to yield the results of futures as they finish.
+
+ Yielding a set of futures like this:
+
+ ``results = yield [future1, future2]``
+
+ pauses the coroutine until both ``future1`` and ``future2``
+ return, and then restarts the coroutine with the results of both
+ futures. If either future is an exception, the expression will
+ raise that exception and all the results will be lost.
+
+ If you need to get the result of each future as soon as possible,
+ or if you need the result of some futures even if others produce
+ errors, you can use ``WaitIterator``::
+
+ wait_iterator = gen.WaitIterator(future1, future2)
+ while not wait_iterator.done():
+ try:
+ result = yield wait_iterator.next()
+ except Exception as e:
+ print("Error {} from {}".format(e, wait_iterator.current_future))
+ else:
+ print("Result {} received from {} at {}".format(
+ result, wait_iterator.current_future,
+ wait_iterator.current_index))
+
+ Because results are returned as soon as they are available the
+ output from the iterator *will not be in the same order as the
+ input arguments*. If you need to know which future produced the
+ current result, you can use the attributes
+ ``WaitIterator.current_future``, or ``WaitIterator.current_index``
+ to get the index of the future from the input list. (if keyword
+ arguments were used in the construction of the `WaitIterator`,
+ ``current_index`` will use the corresponding keyword).
+
+ On Python 3.5, `WaitIterator` implements the async iterator
+ protocol, so it can be used with the ``async for`` statement (note
+ that in this version the entire iteration is aborted if any value
+ raises an exception, while the previous example can continue past
+ individual errors)::
+
+ async for result in gen.WaitIterator(future1, future2):
+ print("Result {} received from {} at {}".format(
+ result, wait_iterator.current_future,
+ wait_iterator.current_index))
+
+ .. versionadded:: 4.1
+
+ .. versionchanged:: 4.3
+ Added ``async for`` support in Python 3.5.
+
+ """
+ def __init__(self, *args, **kwargs):
+ if args and kwargs:
+ raise ValueError(
+ "You must provide args or kwargs, not both")
+
+ if kwargs:
+ self._unfinished = dict((f, k) for (k, f) in kwargs.items())
+ futures = list(kwargs.values())
+ else:
+ self._unfinished = dict((f, i) for (i, f) in enumerate(args))
+ futures = args
+
+ self._finished = collections.deque()
+ self.current_index = self.current_future = None
+ self._running_future = None
+
+ for future in futures:
+ future.add_done_callback(self._done_callback)
+
+ def done(self):
+ """Returns True if this iterator has no more results."""
+ if self._finished or self._unfinished:
+ return False
+ # Clear the 'current' values when iteration is done.
+ self.current_index = self.current_future = None
+ return True
+
+ def next(self):
+ """Returns a `.Future` that will yield the next available result.
+
+ Note that this `.Future` will not be the same object as any of
+ the inputs.
+ """
+ self._running_future = TracebackFuture()
+
+ if self._finished:
+ self._return_result(self._finished.popleft())
+
+ return self._running_future
+
+ def _done_callback(self, done):
+ if self._running_future and not self._running_future.done():
+ self._return_result(done)
+ else:
+ self._finished.append(done)
+
+ def _return_result(self, done):
+ """Called set the returned future's state that of the future
+ we yielded, and set the current future for the iterator.
+ """
+ chain_future(done, self._running_future)
+
+ self.current_future = done
+ self.current_index = self._unfinished.pop(done)
+
+ @coroutine
+ def __aiter__(self):
+ raise Return(self)
+
+ def __anext__(self):
+ if self.done():
+ # Lookup by name to silence pyflakes on older versions.
+ raise getattr(builtins, 'StopAsyncIteration')()
+ return self.next()
+
+
+class YieldPoint(object):
+ """Base class for objects that may be yielded from the generator.
+
+ .. deprecated:: 4.0
+ Use `Futures <.Future>` instead.
+ """
+ def start(self, runner):
+ """Called by the runner after the generator has yielded.
+
+ No other methods will be called on this object before ``start``.
+ """
+ raise NotImplementedError()
+
+ def is_ready(self):
+ """Called by the runner to determine whether to resume the generator.
+
+ Returns a boolean; may be called more than once.
+ """
+ raise NotImplementedError()
+
+ def get_result(self):
+ """Returns the value to use as the result of the yield expression.
+
+ This method will only be called once, and only after `is_ready`
+ has returned true.
+ """
+ raise NotImplementedError()
+
+
+class Callback(YieldPoint):
+ """Returns a callable object that will allow a matching `Wait` to proceed.
+
+ The key may be any value suitable for use as a dictionary key, and is
+ used to match ``Callbacks`` to their corresponding ``Waits``. The key
+ must be unique among outstanding callbacks within a single run of the
+ generator function, but may be reused across different runs of the same
+ function (so constants generally work fine).
+
+ The callback may be called with zero or one arguments; if an argument
+ is given it will be returned by `Wait`.
+
+ .. deprecated:: 4.0
+ Use `Futures <.Future>` instead.
+ """
+ def __init__(self, key):
+ self.key = key
+
+ def start(self, runner):
+ self.runner = runner
+ runner.register_callback(self.key)
+
+ def is_ready(self):
+ return True
+
+ def get_result(self):
+ return self.runner.result_callback(self.key)
+
+
+class Wait(YieldPoint):
+ """Returns the argument passed to the result of a previous `Callback`.
+
+ .. deprecated:: 4.0
+ Use `Futures <.Future>` instead.
+ """
+ def __init__(self, key):
+ self.key = key
+
+ def start(self, runner):
+ self.runner = runner
+
+ def is_ready(self):
+ return self.runner.is_ready(self.key)
+
+ def get_result(self):
+ return self.runner.pop_result(self.key)
+
+
+class WaitAll(YieldPoint):
+ """Returns the results of multiple previous `Callbacks <Callback>`.
+
+ The argument is a sequence of `Callback` keys, and the result is
+ a list of results in the same order.
+
+ `WaitAll` is equivalent to yielding a list of `Wait` objects.
+
+ .. deprecated:: 4.0
+ Use `Futures <.Future>` instead.
+ """
+ def __init__(self, keys):
+ self.keys = keys
+
+ def start(self, runner):
+ self.runner = runner
+
+ def is_ready(self):
+ return all(self.runner.is_ready(key) for key in self.keys)
+
+ def get_result(self):
+ return [self.runner.pop_result(key) for key in self.keys]
+
+
+def Task(func, *args, **kwargs):
+ """Adapts a callback-based asynchronous function for use in coroutines.
+
+ Takes a function (and optional additional arguments) and runs it with
+ those arguments plus a ``callback`` keyword argument. The argument passed
+ to the callback is returned as the result of the yield expression.
+
+ .. versionchanged:: 4.0
+ ``gen.Task`` is now a function that returns a `.Future`, instead of
+ a subclass of `YieldPoint`. It still behaves the same way when
+ yielded.
+ """
+ future = Future()
+
+ def handle_exception(typ, value, tb):
+ if future.done():
+ return False
+ future.set_exc_info((typ, value, tb))
+ return True
+
+ def set_result(result):
+ if future.done():
+ return
+ future.set_result(result)
+ with stack_context.ExceptionStackContext(handle_exception):
+ func(*args, callback=_argument_adapter(set_result), **kwargs)
+ return future
+
+
+class YieldFuture(YieldPoint):
+ def __init__(self, future, io_loop=None):
+ """Adapts a `.Future` to the `YieldPoint` interface.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ self.future = future
+ self.io_loop = io_loop or IOLoop.current()
+
+ def start(self, runner):
+ if not self.future.done():
+ self.runner = runner
+ self.key = object()
+ runner.register_callback(self.key)
+ self.io_loop.add_future(self.future, runner.result_callback(self.key))
+ else:
+ self.runner = None
+ self.result_fn = self.future.result
+
+ def is_ready(self):
+ if self.runner is not None:
+ return self.runner.is_ready(self.key)
+ else:
+ return True
+
+ def get_result(self):
+ if self.runner is not None:
+ return self.runner.pop_result(self.key).result()
+ else:
+ return self.result_fn()
+
+
+def _contains_yieldpoint(children):
+ """Returns True if ``children`` contains any YieldPoints.
+
+ ``children`` may be a dict or a list, as used by `MultiYieldPoint`
+ and `multi_future`.
+ """
+ if isinstance(children, dict):
+ return any(isinstance(i, YieldPoint) for i in children.values())
+ if isinstance(children, list):
+ return any(isinstance(i, YieldPoint) for i in children)
+ return False
+
+
+def multi(children, quiet_exceptions=()):
+ """Runs multiple asynchronous operations in parallel.
+
+ ``children`` may either be a list or a dict whose values are
+ yieldable objects. ``multi()`` returns a new yieldable
+ object that resolves to a parallel structure containing their
+ results. If ``children`` is a list, the result is a list of
+ results in the same order; if it is a dict, the result is a dict
+ with the same keys.
+
+ That is, ``results = yield multi(list_of_futures)`` is equivalent
+ to::
+
+ results = []
+ for future in list_of_futures:
+ results.append(yield future)
+
+ If any children raise exceptions, ``multi()`` will raise the first
+ one. All others will be logged, unless they are of types
+ contained in the ``quiet_exceptions`` argument.
+
+ If any of the inputs are `YieldPoints <YieldPoint>`, the returned
+ yieldable object is a `YieldPoint`. Otherwise, returns a `.Future`.
+ This means that the result of `multi` can be used in a native
+ coroutine if and only if all of its children can be.
+
+ In a ``yield``-based coroutine, it is not normally necessary to
+ call this function directly, since the coroutine runner will
+ do it automatically when a list or dict is yielded. However,
+ it is necessary in ``await``-based coroutines, or to pass
+ the ``quiet_exceptions`` argument.
+
+ This function is available under the names ``multi()`` and ``Multi()``
+ for historical reasons.
+
+ .. versionchanged:: 4.2
+ If multiple yieldables fail, any exceptions after the first
+ (which is raised) will be logged. Added the ``quiet_exceptions``
+ argument to suppress this logging for selected exception types.
+
+ .. versionchanged:: 4.3
+ Replaced the class ``Multi`` and the function ``multi_future``
+ with a unified function ``multi``. Added support for yieldables
+ other than `YieldPoint` and `.Future`.
+
+ """
+ if _contains_yieldpoint(children):
+ return MultiYieldPoint(children, quiet_exceptions=quiet_exceptions)
+ else:
+ return multi_future(children, quiet_exceptions=quiet_exceptions)
+
+
+Multi = multi
+
+
+class MultiYieldPoint(YieldPoint):
+ """Runs multiple asynchronous operations in parallel.
+
+ This class is similar to `multi`, but it always creates a stack
+ context even when no children require it. It is not compatible with
+ native coroutines.
+
+ .. versionchanged:: 4.2
+ If multiple ``YieldPoints`` fail, any exceptions after the first
+ (which is raised) will be logged. Added the ``quiet_exceptions``
+ argument to suppress this logging for selected exception types.
+
+ .. versionchanged:: 4.3
+ Renamed from ``Multi`` to ``MultiYieldPoint``. The name ``Multi``
+ remains as an alias for the equivalent `multi` function.
+
+ .. deprecated:: 4.3
+ Use `multi` instead.
+ """
+ def __init__(self, children, quiet_exceptions=()):
+ self.keys = None
+ if isinstance(children, dict):
+ self.keys = list(children.keys())
+ children = children.values()
+ self.children = []
+ for i in children:
+ if not isinstance(i, YieldPoint):
+ i = convert_yielded(i)
+ if is_future(i):
+ i = YieldFuture(i)
+ self.children.append(i)
+ assert all(isinstance(i, YieldPoint) for i in self.children)
+ self.unfinished_children = set(self.children)
+ self.quiet_exceptions = quiet_exceptions
+
+ def start(self, runner):
+ for i in self.children:
+ i.start(runner)
+
+ def is_ready(self):
+ finished = list(itertools.takewhile(
+ lambda i: i.is_ready(), self.unfinished_children))
+ self.unfinished_children.difference_update(finished)
+ return not self.unfinished_children
+
+ def get_result(self):
+ result_list = []
+ exc_info = None
+ for f in self.children:
+ try:
+ result_list.append(f.get_result())
+ except Exception as e:
+ if exc_info is None:
+ exc_info = sys.exc_info()
+ else:
+ if not isinstance(e, self.quiet_exceptions):
+ app_log.error("Multiple exceptions in yield list",
+ exc_info=True)
+ if exc_info is not None:
+ raise_exc_info(exc_info)
+ if self.keys is not None:
+ return dict(zip(self.keys, result_list))
+ else:
+ return list(result_list)
+
+
+def multi_future(children, quiet_exceptions=()):
+ """Wait for multiple asynchronous futures in parallel.
+
+ This function is similar to `multi`, but does not support
+ `YieldPoints <YieldPoint>`.
+
+ .. versionadded:: 4.0
+
+ .. versionchanged:: 4.2
+ If multiple ``Futures`` fail, any exceptions after the first (which is
+ raised) will be logged. Added the ``quiet_exceptions``
+ argument to suppress this logging for selected exception types.
+
+ .. deprecated:: 4.3
+ Use `multi` instead.
+ """
+ if isinstance(children, dict):
+ keys = list(children.keys())
+ children = children.values()
+ else:
+ keys = None
+ children = list(map(convert_yielded, children))
+ assert all(is_future(i) for i in children)
+ unfinished_children = set(children)
+
+ future = Future()
+ if not children:
+ future.set_result({} if keys is not None else [])
+
+ def callback(f):
+ unfinished_children.remove(f)
+ if not unfinished_children:
+ result_list = []
+ for f in children:
+ try:
+ result_list.append(f.result())
+ except Exception as e:
+ if future.done():
+ if not isinstance(e, quiet_exceptions):
+ app_log.error("Multiple exceptions in yield list",
+ exc_info=True)
+ else:
+ future.set_exc_info(sys.exc_info())
+ if not future.done():
+ if keys is not None:
+ future.set_result(dict(zip(keys, result_list)))
+ else:
+ future.set_result(result_list)
+
+ listening = set()
+ for f in children:
+ if f not in listening:
+ listening.add(f)
+ f.add_done_callback(callback)
+ return future
+
+
+def maybe_future(x):
+ """Converts ``x`` into a `.Future`.
+
+ If ``x`` is already a `.Future`, it is simply returned; otherwise
+ it is wrapped in a new `.Future`. This is suitable for use as
+ ``result = yield gen.maybe_future(f())`` when you don't know whether
+ ``f()`` returns a `.Future` or not.
+
+ .. deprecated:: 4.3
+ This function only handles ``Futures``, not other yieldable objects.
+ Instead of `maybe_future`, check for the non-future result types
+ you expect (often just ``None``), and ``yield`` anything unknown.
+ """
+ if is_future(x):
+ return x
+ else:
+ fut = Future()
+ fut.set_result(x)
+ return fut
+
+
+def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
+ """Wraps a `.Future` (or other yieldable object) in a timeout.
+
+ Raises `TimeoutError` if the input future does not complete before
+ ``timeout``, which may be specified in any form allowed by
+ `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
+ relative to `.IOLoop.time`)
+
+ If the wrapped `.Future` fails after it has timed out, the exception
+ will be logged unless it is of a type contained in ``quiet_exceptions``
+ (which may be an exception type or a sequence of types).
+
+ Does not support `YieldPoint` subclasses.
+
+ .. versionadded:: 4.0
+
+ .. versionchanged:: 4.1
+ Added the ``quiet_exceptions`` argument and the logging of unhandled
+ exceptions.
+
+ .. versionchanged:: 4.4
+ Added support for yieldable objects other than `.Future`.
+ """
+ # TODO: allow YieldPoints in addition to other yieldables?
+ # Tricky to do with stack_context semantics.
+ #
+ # It's tempting to optimize this by cancelling the input future on timeout
+ # instead of creating a new one, but A) we can't know if we are the only
+ # one waiting on the input future, so cancelling it might disrupt other
+ # callers and B) concurrent futures can only be cancelled while they are
+ # in the queue, so cancellation cannot reliably bound our waiting time.
+ future = convert_yielded(future)
+ result = Future()
+ chain_future(future, result)
+ if io_loop is None:
+ io_loop = IOLoop.current()
+
+ def error_callback(future):
+ try:
+ future.result()
+ except Exception as e:
+ if not isinstance(e, quiet_exceptions):
+ app_log.error("Exception in Future %r after timeout",
+ future, exc_info=True)
+
+ def timeout_callback():
+ if not result.done():
+ result.set_exception(TimeoutError("Timeout"))
+ # In case the wrapped future goes on to fail, log it.
+ future.add_done_callback(error_callback)
+ timeout_handle = io_loop.add_timeout(
+ timeout, timeout_callback)
+ if isinstance(future, Future):
+ # We know this future will resolve on the IOLoop, so we don't
+ # need the extra thread-safety of IOLoop.add_future (and we also
+ # don't care about StackContext here.
+ future.add_done_callback(
+ lambda future: io_loop.remove_timeout(timeout_handle))
+ else:
+ # concurrent.futures.Futures may resolve on any thread, so we
+ # need to route them back to the IOLoop.
+ io_loop.add_future(
+ future, lambda future: io_loop.remove_timeout(timeout_handle))
+ return result
+
+
+def sleep(duration):
+ """Return a `.Future` that resolves after the given number of seconds.
+
+ When used with ``yield`` in a coroutine, this is a non-blocking
+ analogue to `time.sleep` (which should not be used in coroutines
+ because it is blocking)::
+
+ yield gen.sleep(0.5)
+
+ Note that calling this function on its own does nothing; you must
+ wait on the `.Future` it returns (usually by yielding it).
+
+ .. versionadded:: 4.1
+ """
+ f = Future()
+ IOLoop.current().call_later(duration, lambda: f.set_result(None))
+ return f
+
+
+_null_future = Future()
+_null_future.set_result(None)
+
+moment = Future()
+moment.__doc__ = \
+ """A special object which may be yielded to allow the IOLoop to run for
+one iteration.
+
+This is not needed in normal use but it can be helpful in long-running
+coroutines that are likely to yield Futures that are ready instantly.
+
+Usage: ``yield gen.moment``
+
+.. versionadded:: 4.0
+
+.. deprecated:: 4.5
+ ``yield None`` is now equivalent to ``yield gen.moment``.
+"""
+moment.set_result(None)
+
+
+class Runner(object):
+ """Internal implementation of `tornado.gen.engine`.
+
+ Maintains information about pending callbacks and their results.
+
+ The results of the generator are stored in ``result_future`` (a
+ `.TracebackFuture`)
+ """
+ def __init__(self, gen, result_future, first_yielded):
+ self.gen = gen
+ self.result_future = result_future
+ self.future = _null_future
+ self.yield_point = None
+ self.pending_callbacks = None
+ self.results = None
+ self.running = False
+ self.finished = False
+ self.had_exception = False
+ self.io_loop = IOLoop.current()
+ # For efficiency, we do not create a stack context until we
+ # reach a YieldPoint (stack contexts are required for the historical
+ # semantics of YieldPoints, but not for Futures). When we have
+ # done so, this field will be set and must be called at the end
+ # of the coroutine.
+ self.stack_context_deactivate = None
+ if self.handle_yield(first_yielded):
+ gen = result_future = first_yielded = None
+ self.run()
+
+ def register_callback(self, key):
+ """Adds ``key`` to the list of callbacks."""
+ if self.pending_callbacks is None:
+ # Lazily initialize the old-style YieldPoint data structures.
+ self.pending_callbacks = set()
+ self.results = {}
+ if key in self.pending_callbacks:
+ raise KeyReuseError("key %r is already pending" % (key,))
+ self.pending_callbacks.add(key)
+
+ def is_ready(self, key):
+ """Returns true if a result is available for ``key``."""
+ if self.pending_callbacks is None or key not in self.pending_callbacks:
+ raise UnknownKeyError("key %r is not pending" % (key,))
+ return key in self.results
+
+ def set_result(self, key, result):
+ """Sets the result for ``key`` and attempts to resume the generator."""
+ self.results[key] = result
+ if self.yield_point is not None and self.yield_point.is_ready():
+ try:
+ self.future.set_result(self.yield_point.get_result())
+ except:
+ self.future.set_exc_info(sys.exc_info())
+ self.yield_point = None
+ self.run()
+
+ def pop_result(self, key):
+ """Returns the result for ``key`` and unregisters it."""
+ self.pending_callbacks.remove(key)
+ return self.results.pop(key)
+
+ def run(self):
+ """Starts or resumes the generator, running until it reaches a
+ yield point that is not ready.
+ """
+ if self.running or self.finished:
+ return
+ try:
+ self.running = True
+ while True:
+ future = self.future
+ if not future.done():
+ return
+ self.future = None
+ try:
+ orig_stack_contexts = stack_context._state.contexts
+ exc_info = None
+
+ try:
+ value = future.result()
+ except Exception:
+ self.had_exception = True
+ exc_info = sys.exc_info()
+ future = None
+
+ if exc_info is not None:
+ try:
+ yielded = self.gen.throw(*exc_info)
+ finally:
+ # Break up a reference to itself
+ # for faster GC on CPython.
+ exc_info = None
+ else:
+ yielded = self.gen.send(value)
+
+ if stack_context._state.contexts is not orig_stack_contexts:
+ self.gen.throw(
+ stack_context.StackContextInconsistentError(
+ 'stack_context inconsistency (probably caused '
+ 'by yield within a "with StackContext" block)'))
+ except (StopIteration, Return) as e:
+ self.finished = True
+ self.future = _null_future
+ if self.pending_callbacks and not self.had_exception:
+ # If we ran cleanly without waiting on all callbacks
+ # raise an error (really more of a warning). If we
+ # had an exception then some callbacks may have been
+ # orphaned, so skip the check in that case.
+ raise LeakedCallbackError(
+ "finished without waiting for callbacks %r" %
+ self.pending_callbacks)
+ self.result_future.set_result(_value_from_stopiteration(e))
+ self.result_future = None
+ self._deactivate_stack_context()
+ return
+ except Exception:
+ self.finished = True
+ self.future = _null_future
+ self.result_future.set_exc_info(sys.exc_info())
+ self.result_future = None
+ self._deactivate_stack_context()
+ return
+ if not self.handle_yield(yielded):
+ return
+ yielded = None
+ finally:
+ self.running = False
+
+ def handle_yield(self, yielded):
+ # Lists containing YieldPoints require stack contexts;
+ # other lists are handled in convert_yielded.
+ if _contains_yieldpoint(yielded):
+ yielded = multi(yielded)
+
+ if isinstance(yielded, YieldPoint):
+ # YieldPoints are too closely coupled to the Runner to go
+ # through the generic convert_yielded mechanism.
+ self.future = TracebackFuture()
+
+ def start_yield_point():
+ try:
+ yielded.start(self)
+ if yielded.is_ready():
+ self.future.set_result(
+ yielded.get_result())
+ else:
+ self.yield_point = yielded
+ except Exception:
+ self.future = TracebackFuture()
+ self.future.set_exc_info(sys.exc_info())
+
+ if self.stack_context_deactivate is None:
+ # Start a stack context if this is the first
+ # YieldPoint we've seen.
+ with stack_context.ExceptionStackContext(
+ self.handle_exception) as deactivate:
+ self.stack_context_deactivate = deactivate
+
+ def cb():
+ start_yield_point()
+ self.run()
+ self.io_loop.add_callback(cb)
+ return False
+ else:
+ start_yield_point()
+ else:
+ try:
+ self.future = convert_yielded(yielded)
+ except BadYieldError:
+ self.future = TracebackFuture()
+ self.future.set_exc_info(sys.exc_info())
+
+ if not self.future.done() or self.future is moment:
+ def inner(f):
+ # Break a reference cycle to speed GC.
+ f = None # noqa
+ self.run()
+ self.io_loop.add_future(
+ self.future, inner)
+ return False
+ return True
+
+ def result_callback(self, key):
+ return stack_context.wrap(_argument_adapter(
+ functools.partial(self.set_result, key)))
+
+ def handle_exception(self, typ, value, tb):
+ if not self.running and not self.finished:
+ self.future = TracebackFuture()
+ self.future.set_exc_info((typ, value, tb))
+ self.run()
+ return True
+ else:
+ return False
+
+ def _deactivate_stack_context(self):
+ if self.stack_context_deactivate is not None:
+ self.stack_context_deactivate()
+ self.stack_context_deactivate = None
+
+
+Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
+
+
+def _argument_adapter(callback):
+ """Returns a function that when invoked runs ``callback`` with one arg.
+
+ If the function returned by this function is called with exactly
+ one argument, that argument is passed to ``callback``. Otherwise
+ the args tuple and kwargs dict are wrapped in an `Arguments` object.
+ """
+ def wrapper(*args, **kwargs):
+ if kwargs or len(args) > 1:
+ callback(Arguments(args, kwargs))
+ elif args:
+ callback(args[0])
+ else:
+ callback(None)
+ return wrapper
+
+
+# Convert Awaitables into Futures. It is unfortunately possible
+# to have infinite recursion here if those Awaitables assume that
+# we're using a different coroutine runner and yield objects
+# we don't understand. If that happens, the solution is to
+# register that runner's yieldable objects with convert_yielded.
+if sys.version_info >= (3, 3):
+ exec(textwrap.dedent("""
+ @coroutine
+ def _wrap_awaitable(x):
+ if hasattr(x, '__await__'):
+ x = x.__await__()
+ return (yield from x)
+ """))
+else:
+ # Py2-compatible version for use with Cython.
+ # Copied from PEP 380.
+ @coroutine
+ def _wrap_awaitable(x):
+ if hasattr(x, '__await__'):
+ _i = x.__await__()
+ else:
+ _i = iter(x)
+ try:
+ _y = next(_i)
+ except StopIteration as _e:
+ _r = _value_from_stopiteration(_e)
+ else:
+ while 1:
+ try:
+ _s = yield _y
+ except GeneratorExit as _e:
+ try:
+ _m = _i.close
+ except AttributeError:
+ pass
+ else:
+ _m()
+ raise _e
+ except BaseException as _e:
+ _x = sys.exc_info()
+ try:
+ _m = _i.throw
+ except AttributeError:
+ raise _e
+ else:
+ try:
+ _y = _m(*_x)
+ except StopIteration as _e:
+ _r = _value_from_stopiteration(_e)
+ break
+ else:
+ try:
+ if _s is None:
+ _y = next(_i)
+ else:
+ _y = _i.send(_s)
+ except StopIteration as _e:
+ _r = _value_from_stopiteration(_e)
+ break
+ raise Return(_r)
+
+
+def convert_yielded(yielded):
+ """Convert a yielded object into a `.Future`.
+
+ The default implementation accepts lists, dictionaries, and Futures.
+
+ If the `~functools.singledispatch` library is available, this function
+ may be extended to support additional types. For example::
+
+ @convert_yielded.register(asyncio.Future)
+ def _(asyncio_future):
+ return tornado.platform.asyncio.to_tornado_future(asyncio_future)
+
+ .. versionadded:: 4.1
+ """
+ # Lists and dicts containing YieldPoints were handled earlier.
+ if yielded is None:
+ return moment
+ elif isinstance(yielded, (list, dict)):
+ return multi(yielded)
+ elif is_future(yielded):
+ return yielded
+ elif isawaitable(yielded):
+ return _wrap_awaitable(yielded)
+ else:
+ raise BadYieldError("yielded unknown object %r" % (yielded,))
+
+
+if singledispatch is not None:
+ convert_yielded = singledispatch(convert_yielded)
+
+ try:
+ # If we can import t.p.asyncio, do it for its side effect
+ # (registering asyncio.Future with convert_yielded).
+ # It's ugly to do this here, but it prevents a cryptic
+ # infinite recursion in _wrap_awaitable.
+ # Note that even with this, asyncio integration is unlikely
+ # to work unless the application also configures AsyncIOLoop,
+ # but at least the error messages in that case are more
+ # comprehensible than a stack overflow.
+ import tornado.platform.asyncio
+ except ImportError:
+ pass
+ else:
+ # Reference the imported module to make pyflakes happy.
+ tornado
diff --git a/contrib/python/tornado/tornado-4/tornado/http1connection.py b/contrib/python/tornado/tornado-4/tornado/http1connection.py
index 32bed6c961..72e6c3b1f4 100644
--- a/contrib/python/tornado/tornado-4/tornado/http1connection.py
+++ b/contrib/python/tornado/tornado-4/tornado/http1connection.py
@@ -1,742 +1,742 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Client and server implementations of HTTP/1.x.
-
-.. versionadded:: 4.0
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import re
-
-from tornado.concurrent import Future
-from tornado.escape import native_str, utf8
-from tornado import gen
-from tornado import httputil
-from tornado import iostream
-from tornado.log import gen_log, app_log
-from tornado import stack_context
-from tornado.util import GzipDecompressor, PY3
-
-
-class _QuietException(Exception):
- def __init__(self):
- pass
-
-
-class _ExceptionLoggingContext(object):
- """Used with the ``with`` statement when calling delegate methods to
- log any exceptions with the given logger. Any exceptions caught are
- converted to _QuietException
- """
- def __init__(self, logger):
- self.logger = logger
-
- def __enter__(self):
- pass
-
- def __exit__(self, typ, value, tb):
- if value is not None:
- self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
- raise _QuietException
-
-
-class HTTP1ConnectionParameters(object):
- """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
- """
- def __init__(self, no_keep_alive=False, chunk_size=None,
- max_header_size=None, header_timeout=None, max_body_size=None,
- body_timeout=None, decompress=False):
- """
- :arg bool no_keep_alive: If true, always close the connection after
- one request.
- :arg int chunk_size: how much data to read into memory at once
- :arg int max_header_size: maximum amount of data for HTTP headers
- :arg float header_timeout: how long to wait for all headers (seconds)
- :arg int max_body_size: maximum amount of data for body
- :arg float body_timeout: how long to wait while reading body (seconds)
- :arg bool decompress: if true, decode incoming
- ``Content-Encoding: gzip``
- """
- self.no_keep_alive = no_keep_alive
- self.chunk_size = chunk_size or 65536
- self.max_header_size = max_header_size or 65536
- self.header_timeout = header_timeout
- self.max_body_size = max_body_size
- self.body_timeout = body_timeout
- self.decompress = decompress
-
-
-class HTTP1Connection(httputil.HTTPConnection):
- """Implements the HTTP/1.x protocol.
-
- This class can be on its own for clients, or via `HTTP1ServerConnection`
- for servers.
- """
- def __init__(self, stream, is_client, params=None, context=None):
- """
- :arg stream: an `.IOStream`
- :arg bool is_client: client or server
- :arg params: a `.HTTP1ConnectionParameters` instance or ``None``
- :arg context: an opaque application-defined object that can be accessed
- as ``connection.context``.
- """
- self.is_client = is_client
- self.stream = stream
- if params is None:
- params = HTTP1ConnectionParameters()
- self.params = params
- self.context = context
- self.no_keep_alive = params.no_keep_alive
- # The body limits can be altered by the delegate, so save them
- # here instead of just referencing self.params later.
- self._max_body_size = (self.params.max_body_size or
- self.stream.max_buffer_size)
- self._body_timeout = self.params.body_timeout
- # _write_finished is set to True when finish() has been called,
- # i.e. there will be no more data sent. Data may still be in the
- # stream's write buffer.
- self._write_finished = False
- # True when we have read the entire incoming body.
- self._read_finished = False
- # _finish_future resolves when all data has been written and flushed
- # to the IOStream.
- self._finish_future = Future()
- # If true, the connection should be closed after this request
- # (after the response has been written in the server side,
- # and after it has been read in the client)
- self._disconnect_on_finish = False
- self._clear_callbacks()
- # Save the start lines after we read or write them; they
- # affect later processing (e.g. 304 responses and HEAD methods
- # have content-length but no bodies)
- self._request_start_line = None
- self._response_start_line = None
- self._request_headers = None
- # True if we are writing output with chunked encoding.
- self._chunking_output = None
- # While reading a body with a content-length, this is the
- # amount left to read.
- self._expected_content_remaining = None
- # A Future for our outgoing writes, returned by IOStream.write.
- self._pending_write = None
-
- def read_response(self, delegate):
- """Read a single HTTP response.
-
- Typical client-mode usage is to write a request using `write_headers`,
- `write`, and `finish`, and then call ``read_response``.
-
- :arg delegate: a `.HTTPMessageDelegate`
-
- Returns a `.Future` that resolves to None after the full response has
- been read.
- """
- if self.params.decompress:
- delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
- return self._read_message(delegate)
-
- @gen.coroutine
- def _read_message(self, delegate):
- need_delegate_close = False
- try:
- header_future = self.stream.read_until_regex(
- b"\r?\n\r?\n",
- max_bytes=self.params.max_header_size)
- if self.params.header_timeout is None:
- header_data = yield header_future
- else:
- try:
- header_data = yield gen.with_timeout(
- self.stream.io_loop.time() + self.params.header_timeout,
- header_future,
- io_loop=self.stream.io_loop,
- quiet_exceptions=iostream.StreamClosedError)
- except gen.TimeoutError:
- self.close()
- raise gen.Return(False)
- start_line, headers = self._parse_headers(header_data)
- if self.is_client:
- start_line = httputil.parse_response_start_line(start_line)
- self._response_start_line = start_line
- else:
- start_line = httputil.parse_request_start_line(start_line)
- self._request_start_line = start_line
- self._request_headers = headers
-
- self._disconnect_on_finish = not self._can_keep_alive(
- start_line, headers)
- need_delegate_close = True
- with _ExceptionLoggingContext(app_log):
- header_future = delegate.headers_received(start_line, headers)
- if header_future is not None:
- yield header_future
- if self.stream is None:
- # We've been detached.
- need_delegate_close = False
- raise gen.Return(False)
- skip_body = False
- if self.is_client:
- if (self._request_start_line is not None and
- self._request_start_line.method == 'HEAD'):
- skip_body = True
- code = start_line.code
- if code == 304:
- # 304 responses may include the content-length header
- # but do not actually have a body.
- # http://tools.ietf.org/html/rfc7230#section-3.3
- skip_body = True
- if code >= 100 and code < 200:
- # 1xx responses should never indicate the presence of
- # a body.
- if ('Content-Length' in headers or
- 'Transfer-Encoding' in headers):
- raise httputil.HTTPInputError(
- "Response code %d cannot have body" % code)
- # TODO: client delegates will get headers_received twice
- # in the case of a 100-continue. Document or change?
- yield self._read_message(delegate)
- else:
- if (headers.get("Expect") == "100-continue" and
- not self._write_finished):
- self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
- if not skip_body:
- body_future = self._read_body(
- start_line.code if self.is_client else 0, headers, delegate)
- if body_future is not None:
- if self._body_timeout is None:
- yield body_future
- else:
- try:
- yield gen.with_timeout(
- self.stream.io_loop.time() + self._body_timeout,
- body_future, self.stream.io_loop,
- quiet_exceptions=iostream.StreamClosedError)
- except gen.TimeoutError:
- gen_log.info("Timeout reading body from %s",
- self.context)
- self.stream.close()
- raise gen.Return(False)
- self._read_finished = True
- if not self._write_finished or self.is_client:
- need_delegate_close = False
- with _ExceptionLoggingContext(app_log):
- delegate.finish()
- # If we're waiting for the application to produce an asynchronous
- # response, and we're not detached, register a close callback
- # on the stream (we didn't need one while we were reading)
- if (not self._finish_future.done() and
- self.stream is not None and
- not self.stream.closed()):
- self.stream.set_close_callback(self._on_connection_close)
- yield self._finish_future
- if self.is_client and self._disconnect_on_finish:
- self.close()
- if self.stream is None:
- raise gen.Return(False)
- except httputil.HTTPInputError as e:
- gen_log.info("Malformed HTTP message from %s: %s",
- self.context, e)
- self.close()
- raise gen.Return(False)
- finally:
- if need_delegate_close:
- with _ExceptionLoggingContext(app_log):
- delegate.on_connection_close()
- header_future = None
- self._clear_callbacks()
- raise gen.Return(True)
-
- def _clear_callbacks(self):
- """Clears the callback attributes.
-
- This allows the request handler to be garbage collected more
- quickly in CPython by breaking up reference cycles.
- """
- self._write_callback = None
- self._write_future = None
- self._close_callback = None
- if self.stream is not None:
- self.stream.set_close_callback(None)
-
- def set_close_callback(self, callback):
- """Sets a callback that will be run when the connection is closed.
-
- .. deprecated:: 4.0
- Use `.HTTPMessageDelegate.on_connection_close` instead.
- """
- self._close_callback = stack_context.wrap(callback)
-
- def _on_connection_close(self):
- # Note that this callback is only registered on the IOStream
- # when we have finished reading the request and are waiting for
- # the application to produce its response.
- if self._close_callback is not None:
- callback = self._close_callback
- self._close_callback = None
- callback()
- if not self._finish_future.done():
- self._finish_future.set_result(None)
- self._clear_callbacks()
-
- def close(self):
- if self.stream is not None:
- self.stream.close()
- self._clear_callbacks()
- if not self._finish_future.done():
- self._finish_future.set_result(None)
-
- def detach(self):
- """Take control of the underlying stream.
-
- Returns the underlying `.IOStream` object and stops all further
- HTTP processing. May only be called during
- `.HTTPMessageDelegate.headers_received`. Intended for implementing
- protocols like websockets that tunnel over an HTTP handshake.
- """
- self._clear_callbacks()
- stream = self.stream
- self.stream = None
- if not self._finish_future.done():
- self._finish_future.set_result(None)
- return stream
-
- def set_body_timeout(self, timeout):
- """Sets the body timeout for a single request.
-
- Overrides the value from `.HTTP1ConnectionParameters`.
- """
- self._body_timeout = timeout
-
- def set_max_body_size(self, max_body_size):
- """Sets the body size limit for a single request.
-
- Overrides the value from `.HTTP1ConnectionParameters`.
- """
- self._max_body_size = max_body_size
-
- def write_headers(self, start_line, headers, chunk=None, callback=None):
- """Implements `.HTTPConnection.write_headers`."""
- lines = []
- if self.is_client:
- self._request_start_line = start_line
- lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
- # Client requests with a non-empty body must have either a
- # Content-Length or a Transfer-Encoding.
- self._chunking_output = (
- start_line.method in ('POST', 'PUT', 'PATCH') and
- 'Content-Length' not in headers and
- 'Transfer-Encoding' not in headers)
- else:
- self._response_start_line = start_line
- lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
- self._chunking_output = (
- # TODO: should this use
- # self._request_start_line.version or
- # start_line.version?
- self._request_start_line.version == 'HTTP/1.1' and
- # 1xx, 204 and 304 responses have no body (not even a zero-length
- # body), and so should not have either Content-Length or
- # Transfer-Encoding headers.
- start_line.code not in (204, 304) and
- (start_line.code < 100 or start_line.code >= 200) and
- # No need to chunk the output if a Content-Length is specified.
- 'Content-Length' not in headers and
- # Applications are discouraged from touching Transfer-Encoding,
- # but if they do, leave it alone.
- 'Transfer-Encoding' not in headers)
- # If a 1.0 client asked for keep-alive, add the header.
- if (self._request_start_line.version == 'HTTP/1.0' and
- (self._request_headers.get('Connection', '').lower() ==
- 'keep-alive')):
- headers['Connection'] = 'Keep-Alive'
- if self._chunking_output:
- headers['Transfer-Encoding'] = 'chunked'
- if (not self.is_client and
- (self._request_start_line.method == 'HEAD' or
- start_line.code == 304)):
- self._expected_content_remaining = 0
- elif 'Content-Length' in headers:
- self._expected_content_remaining = int(headers['Content-Length'])
- else:
- self._expected_content_remaining = None
- # TODO: headers are supposed to be of type str, but we still have some
- # cases that let bytes slip through. Remove these native_str calls when those
- # are fixed.
- header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all())
- if PY3:
- lines.extend(l.encode('latin1') for l in header_lines)
- else:
- lines.extend(header_lines)
- for line in lines:
- if b'\n' in line:
- raise ValueError('Newline in header: ' + repr(line))
- future = None
- if self.stream.closed():
- future = self._write_future = Future()
- future.set_exception(iostream.StreamClosedError())
- future.exception()
- else:
- if callback is not None:
- self._write_callback = stack_context.wrap(callback)
- else:
- future = self._write_future = Future()
- data = b"\r\n".join(lines) + b"\r\n\r\n"
- if chunk:
- data += self._format_chunk(chunk)
- self._pending_write = self.stream.write(data)
- self._pending_write.add_done_callback(self._on_write_complete)
- return future
-
- def _format_chunk(self, chunk):
- if self._expected_content_remaining is not None:
- self._expected_content_remaining -= len(chunk)
- if self._expected_content_remaining < 0:
- # Close the stream now to stop further framing errors.
- self.stream.close()
- raise httputil.HTTPOutputError(
- "Tried to write more data than Content-Length")
- if self._chunking_output and chunk:
- # Don't write out empty chunks because that means END-OF-STREAM
- # with chunked encoding
- return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
- else:
- return chunk
-
- def write(self, chunk, callback=None):
- """Implements `.HTTPConnection.write`.
-
- For backwards compatibility is is allowed but deprecated to
- skip `write_headers` and instead call `write()` with a
- pre-encoded header block.
- """
- future = None
- if self.stream.closed():
- future = self._write_future = Future()
- self._write_future.set_exception(iostream.StreamClosedError())
- self._write_future.exception()
- else:
- if callback is not None:
- self._write_callback = stack_context.wrap(callback)
- else:
- future = self._write_future = Future()
- self._pending_write = self.stream.write(self._format_chunk(chunk))
- self._pending_write.add_done_callback(self._on_write_complete)
- return future
-
- def finish(self):
- """Implements `.HTTPConnection.finish`."""
- if (self._expected_content_remaining is not None and
- self._expected_content_remaining != 0 and
- not self.stream.closed()):
- self.stream.close()
- raise httputil.HTTPOutputError(
- "Tried to write %d bytes less than Content-Length" %
- self._expected_content_remaining)
- if self._chunking_output:
- if not self.stream.closed():
- self._pending_write = self.stream.write(b"0\r\n\r\n")
- self._pending_write.add_done_callback(self._on_write_complete)
- self._write_finished = True
- # If the app finished the request while we're still reading,
- # divert any remaining data away from the delegate and
- # close the connection when we're done sending our response.
- # Closing the connection is the only way to avoid reading the
- # whole input body.
- if not self._read_finished:
- self._disconnect_on_finish = True
- # No more data is coming, so instruct TCP to send any remaining
- # data immediately instead of waiting for a full packet or ack.
- self.stream.set_nodelay(True)
- if self._pending_write is None:
- self._finish_request(None)
- else:
- self._pending_write.add_done_callback(self._finish_request)
-
- def _on_write_complete(self, future):
- exc = future.exception()
- if exc is not None and not isinstance(exc, iostream.StreamClosedError):
- future.result()
- if self._write_callback is not None:
- callback = self._write_callback
- self._write_callback = None
- self.stream.io_loop.add_callback(callback)
- if self._write_future is not None:
- future = self._write_future
- self._write_future = None
- future.set_result(None)
-
- def _can_keep_alive(self, start_line, headers):
- if self.params.no_keep_alive:
- return False
- connection_header = headers.get("Connection")
- if connection_header is not None:
- connection_header = connection_header.lower()
- if start_line.version == "HTTP/1.1":
- return connection_header != "close"
- elif ("Content-Length" in headers or
- headers.get("Transfer-Encoding", "").lower() == "chunked" or
- getattr(start_line, 'method', None) in ("HEAD", "GET")):
- # start_line may be a request or response start line; only
- # the former has a method attribute.
- return connection_header == "keep-alive"
- return False
-
- def _finish_request(self, future):
- self._clear_callbacks()
- if not self.is_client and self._disconnect_on_finish:
- self.close()
- return
- # Turn Nagle's algorithm back on, leaving the stream in its
- # default state for the next request.
- self.stream.set_nodelay(False)
- if not self._finish_future.done():
- self._finish_future.set_result(None)
-
- def _parse_headers(self, data):
- # The lstrip removes newlines that some implementations sometimes
- # insert between messages of a reused connection. Per RFC 7230,
- # we SHOULD ignore at least one empty line before the request.
- # http://tools.ietf.org/html/rfc7230#section-3.5
- data = native_str(data.decode('latin1')).lstrip("\r\n")
- # RFC 7230 section allows for both CRLF and bare LF.
- eol = data.find("\n")
- start_line = data[:eol].rstrip("\r")
- try:
- headers = httputil.HTTPHeaders.parse(data[eol:])
- except ValueError:
- # probably form split() if there was no ':' in the line
- raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
- data[eol:100])
- return start_line, headers
-
- def _read_body(self, code, headers, delegate):
- if "Content-Length" in headers:
- if "Transfer-Encoding" in headers:
- # Response cannot contain both Content-Length and
- # Transfer-Encoding headers.
- # http://tools.ietf.org/html/rfc7230#section-3.3.3
- raise httputil.HTTPInputError(
- "Response with both Transfer-Encoding and Content-Length")
- if "," in headers["Content-Length"]:
- # Proxies sometimes cause Content-Length headers to get
- # duplicated. If all the values are identical then we can
- # use them but if they differ it's an error.
- pieces = re.split(r',\s*', headers["Content-Length"])
- if any(i != pieces[0] for i in pieces):
- raise httputil.HTTPInputError(
- "Multiple unequal Content-Lengths: %r" %
- headers["Content-Length"])
- headers["Content-Length"] = pieces[0]
-
- try:
- content_length = int(headers["Content-Length"])
- except ValueError:
- # Handles non-integer Content-Length value.
- raise httputil.HTTPInputError(
- "Only integer Content-Length is allowed: %s" % headers["Content-Length"])
-
- if content_length > self._max_body_size:
- raise httputil.HTTPInputError("Content-Length too long")
- else:
- content_length = None
-
- if code == 204:
- # This response code is not allowed to have a non-empty body,
- # and has an implicit length of zero instead of read-until-close.
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
- if ("Transfer-Encoding" in headers or
- content_length not in (None, 0)):
- raise httputil.HTTPInputError(
- "Response with code %d should not have body" % code)
- content_length = 0
-
- if content_length is not None:
- return self._read_fixed_body(content_length, delegate)
- if headers.get("Transfer-Encoding", "").lower() == "chunked":
- return self._read_chunked_body(delegate)
- if self.is_client:
- return self._read_body_until_close(delegate)
- return None
-
- @gen.coroutine
- def _read_fixed_body(self, content_length, delegate):
- while content_length > 0:
- body = yield self.stream.read_bytes(
- min(self.params.chunk_size, content_length), partial=True)
- content_length -= len(body)
- if not self._write_finished or self.is_client:
- with _ExceptionLoggingContext(app_log):
- ret = delegate.data_received(body)
- if ret is not None:
- yield ret
-
- @gen.coroutine
- def _read_chunked_body(self, delegate):
- # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
- total_size = 0
- while True:
- chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
- chunk_len = int(chunk_len.strip(), 16)
- if chunk_len == 0:
- crlf = yield self.stream.read_bytes(2)
- if crlf != b'\r\n':
- raise httputil.HTTPInputError("improperly terminated chunked request")
- return
- total_size += chunk_len
- if total_size > self._max_body_size:
- raise httputil.HTTPInputError("chunked body too large")
- bytes_to_read = chunk_len
- while bytes_to_read:
- chunk = yield self.stream.read_bytes(
- min(bytes_to_read, self.params.chunk_size), partial=True)
- bytes_to_read -= len(chunk)
- if not self._write_finished or self.is_client:
- with _ExceptionLoggingContext(app_log):
- ret = delegate.data_received(chunk)
- if ret is not None:
- yield ret
- # chunk ends with \r\n
- crlf = yield self.stream.read_bytes(2)
- assert crlf == b"\r\n"
-
- @gen.coroutine
- def _read_body_until_close(self, delegate):
- body = yield self.stream.read_until_close()
- if not self._write_finished or self.is_client:
- with _ExceptionLoggingContext(app_log):
- delegate.data_received(body)
-
-
-class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
- """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
- """
- def __init__(self, delegate, chunk_size):
- self._delegate = delegate
- self._chunk_size = chunk_size
- self._decompressor = None
-
- def headers_received(self, start_line, headers):
- if headers.get("Content-Encoding") == "gzip":
- self._decompressor = GzipDecompressor()
- # Downstream delegates will only see uncompressed data,
- # so rename the content-encoding header.
- # (but note that curl_httpclient doesn't do this).
- headers.add("X-Consumed-Content-Encoding",
- headers["Content-Encoding"])
- del headers["Content-Encoding"]
- return self._delegate.headers_received(start_line, headers)
-
- @gen.coroutine
- def data_received(self, chunk):
- if self._decompressor:
- compressed_data = chunk
- while compressed_data:
- decompressed = self._decompressor.decompress(
- compressed_data, self._chunk_size)
- if decompressed:
- ret = self._delegate.data_received(decompressed)
- if ret is not None:
- yield ret
- compressed_data = self._decompressor.unconsumed_tail
- else:
- ret = self._delegate.data_received(chunk)
- if ret is not None:
- yield ret
-
- def finish(self):
- if self._decompressor is not None:
- tail = self._decompressor.flush()
- if tail:
- # I believe the tail will always be empty (i.e.
- # decompress will return all it can). The purpose
- # of the flush call is to detect errors such
- # as truncated input. But in case it ever returns
- # anything, treat it as an extra chunk
- self._delegate.data_received(tail)
- return self._delegate.finish()
-
- def on_connection_close(self):
- return self._delegate.on_connection_close()
-
-
-class HTTP1ServerConnection(object):
- """An HTTP/1.x server."""
- def __init__(self, stream, params=None, context=None):
- """
- :arg stream: an `.IOStream`
- :arg params: a `.HTTP1ConnectionParameters` or None
- :arg context: an opaque application-defined object that is accessible
- as ``connection.context``
- """
- self.stream = stream
- if params is None:
- params = HTTP1ConnectionParameters()
- self.params = params
- self.context = context
- self._serving_future = None
-
- @gen.coroutine
- def close(self):
- """Closes the connection.
-
- Returns a `.Future` that resolves after the serving loop has exited.
- """
- self.stream.close()
- # Block until the serving loop is done, but ignore any exceptions
- # (start_serving is already responsible for logging them).
- try:
- yield self._serving_future
- except Exception:
- pass
-
- def start_serving(self, delegate):
- """Starts serving requests on this connection.
-
- :arg delegate: a `.HTTPServerConnectionDelegate`
- """
- assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
- self._serving_future = self._server_request_loop(delegate)
- # Register the future on the IOLoop so its errors get logged.
- self.stream.io_loop.add_future(self._serving_future,
- lambda f: f.result())
-
- @gen.coroutine
- def _server_request_loop(self, delegate):
- try:
- while True:
- conn = HTTP1Connection(self.stream, False,
- self.params, self.context)
- request_delegate = delegate.start_request(self, conn)
- try:
- ret = yield conn.read_response(request_delegate)
- except (iostream.StreamClosedError,
- iostream.UnsatisfiableReadError):
- return
- except _QuietException:
- # This exception was already logged.
- conn.close()
- return
- except Exception:
- gen_log.error("Uncaught exception", exc_info=True)
- conn.close()
- return
- if not ret:
- return
- yield gen.moment
- finally:
- delegate.on_close(self)
+#!/usr/bin/env python
+#
+# Copyright 2014 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Client and server implementations of HTTP/1.x.
+
+.. versionadded:: 4.0
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from tornado.concurrent import Future
+from tornado.escape import native_str, utf8
+from tornado import gen
+from tornado import httputil
+from tornado import iostream
+from tornado.log import gen_log, app_log
+from tornado import stack_context
+from tornado.util import GzipDecompressor, PY3
+
+
+class _QuietException(Exception):
+ def __init__(self):
+ pass
+
+
+class _ExceptionLoggingContext(object):
+ """Used with the ``with`` statement when calling delegate methods to
+ log any exceptions with the given logger. Any exceptions caught are
+ converted to _QuietException
+ """
+ def __init__(self, logger):
+ self.logger = logger
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, typ, value, tb):
+ if value is not None:
+ self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
+ raise _QuietException
+
+
+class HTTP1ConnectionParameters(object):
+ """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
+ """
+ def __init__(self, no_keep_alive=False, chunk_size=None,
+ max_header_size=None, header_timeout=None, max_body_size=None,
+ body_timeout=None, decompress=False):
+ """
+ :arg bool no_keep_alive: If true, always close the connection after
+ one request.
+ :arg int chunk_size: how much data to read into memory at once
+ :arg int max_header_size: maximum amount of data for HTTP headers
+ :arg float header_timeout: how long to wait for all headers (seconds)
+ :arg int max_body_size: maximum amount of data for body
+ :arg float body_timeout: how long to wait while reading body (seconds)
+ :arg bool decompress: if true, decode incoming
+ ``Content-Encoding: gzip``
+ """
+ self.no_keep_alive = no_keep_alive
+ self.chunk_size = chunk_size or 65536
+ self.max_header_size = max_header_size or 65536
+ self.header_timeout = header_timeout
+ self.max_body_size = max_body_size
+ self.body_timeout = body_timeout
+ self.decompress = decompress
+
+
+class HTTP1Connection(httputil.HTTPConnection):
+ """Implements the HTTP/1.x protocol.
+
+ This class can be on its own for clients, or via `HTTP1ServerConnection`
+ for servers.
+ """
+ def __init__(self, stream, is_client, params=None, context=None):
+ """
+ :arg stream: an `.IOStream`
+ :arg bool is_client: client or server
+ :arg params: a `.HTTP1ConnectionParameters` instance or ``None``
+ :arg context: an opaque application-defined object that can be accessed
+ as ``connection.context``.
+ """
+ self.is_client = is_client
+ self.stream = stream
+ if params is None:
+ params = HTTP1ConnectionParameters()
+ self.params = params
+ self.context = context
+ self.no_keep_alive = params.no_keep_alive
+ # The body limits can be altered by the delegate, so save them
+ # here instead of just referencing self.params later.
+ self._max_body_size = (self.params.max_body_size or
+ self.stream.max_buffer_size)
+ self._body_timeout = self.params.body_timeout
+ # _write_finished is set to True when finish() has been called,
+ # i.e. there will be no more data sent. Data may still be in the
+ # stream's write buffer.
+ self._write_finished = False
+ # True when we have read the entire incoming body.
+ self._read_finished = False
+ # _finish_future resolves when all data has been written and flushed
+ # to the IOStream.
+ self._finish_future = Future()
+ # If true, the connection should be closed after this request
+ # (after the response has been written in the server side,
+ # and after it has been read in the client)
+ self._disconnect_on_finish = False
+ self._clear_callbacks()
+ # Save the start lines after we read or write them; they
+ # affect later processing (e.g. 304 responses and HEAD methods
+ # have content-length but no bodies)
+ self._request_start_line = None
+ self._response_start_line = None
+ self._request_headers = None
+ # True if we are writing output with chunked encoding.
+ self._chunking_output = None
+ # While reading a body with a content-length, this is the
+ # amount left to read.
+ self._expected_content_remaining = None
+ # A Future for our outgoing writes, returned by IOStream.write.
+ self._pending_write = None
+
+ def read_response(self, delegate):
+ """Read a single HTTP response.
+
+ Typical client-mode usage is to write a request using `write_headers`,
+ `write`, and `finish`, and then call ``read_response``.
+
+ :arg delegate: a `.HTTPMessageDelegate`
+
+ Returns a `.Future` that resolves to None after the full response has
+ been read.
+ """
+ if self.params.decompress:
+ delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
+ return self._read_message(delegate)
+
+ @gen.coroutine
+ def _read_message(self, delegate):
+ need_delegate_close = False
+ try:
+ header_future = self.stream.read_until_regex(
+ b"\r?\n\r?\n",
+ max_bytes=self.params.max_header_size)
+ if self.params.header_timeout is None:
+ header_data = yield header_future
+ else:
+ try:
+ header_data = yield gen.with_timeout(
+ self.stream.io_loop.time() + self.params.header_timeout,
+ header_future,
+ io_loop=self.stream.io_loop,
+ quiet_exceptions=iostream.StreamClosedError)
+ except gen.TimeoutError:
+ self.close()
+ raise gen.Return(False)
+ start_line, headers = self._parse_headers(header_data)
+ if self.is_client:
+ start_line = httputil.parse_response_start_line(start_line)
+ self._response_start_line = start_line
+ else:
+ start_line = httputil.parse_request_start_line(start_line)
+ self._request_start_line = start_line
+ self._request_headers = headers
+
+ self._disconnect_on_finish = not self._can_keep_alive(
+ start_line, headers)
+ need_delegate_close = True
+ with _ExceptionLoggingContext(app_log):
+ header_future = delegate.headers_received(start_line, headers)
+ if header_future is not None:
+ yield header_future
+ if self.stream is None:
+ # We've been detached.
+ need_delegate_close = False
+ raise gen.Return(False)
+ skip_body = False
+ if self.is_client:
+ if (self._request_start_line is not None and
+ self._request_start_line.method == 'HEAD'):
+ skip_body = True
+ code = start_line.code
+ if code == 304:
+ # 304 responses may include the content-length header
+ # but do not actually have a body.
+ # http://tools.ietf.org/html/rfc7230#section-3.3
+ skip_body = True
+ if code >= 100 and code < 200:
+ # 1xx responses should never indicate the presence of
+ # a body.
+ if ('Content-Length' in headers or
+ 'Transfer-Encoding' in headers):
+ raise httputil.HTTPInputError(
+ "Response code %d cannot have body" % code)
+ # TODO: client delegates will get headers_received twice
+ # in the case of a 100-continue. Document or change?
+ yield self._read_message(delegate)
+ else:
+ if (headers.get("Expect") == "100-continue" and
+ not self._write_finished):
+ self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
+ if not skip_body:
+ body_future = self._read_body(
+ start_line.code if self.is_client else 0, headers, delegate)
+ if body_future is not None:
+ if self._body_timeout is None:
+ yield body_future
+ else:
+ try:
+ yield gen.with_timeout(
+ self.stream.io_loop.time() + self._body_timeout,
+ body_future, self.stream.io_loop,
+ quiet_exceptions=iostream.StreamClosedError)
+ except gen.TimeoutError:
+ gen_log.info("Timeout reading body from %s",
+ self.context)
+ self.stream.close()
+ raise gen.Return(False)
+ self._read_finished = True
+ if not self._write_finished or self.is_client:
+ need_delegate_close = False
+ with _ExceptionLoggingContext(app_log):
+ delegate.finish()
+ # If we're waiting for the application to produce an asynchronous
+ # response, and we're not detached, register a close callback
+ # on the stream (we didn't need one while we were reading)
+ if (not self._finish_future.done() and
+ self.stream is not None and
+ not self.stream.closed()):
+ self.stream.set_close_callback(self._on_connection_close)
+ yield self._finish_future
+ if self.is_client and self._disconnect_on_finish:
+ self.close()
+ if self.stream is None:
+ raise gen.Return(False)
+ except httputil.HTTPInputError as e:
+ gen_log.info("Malformed HTTP message from %s: %s",
+ self.context, e)
+ self.close()
+ raise gen.Return(False)
+ finally:
+ if need_delegate_close:
+ with _ExceptionLoggingContext(app_log):
+ delegate.on_connection_close()
+ header_future = None
+ self._clear_callbacks()
+ raise gen.Return(True)
+
+ def _clear_callbacks(self):
+ """Clears the callback attributes.
+
+ This allows the request handler to be garbage collected more
+ quickly in CPython by breaking up reference cycles.
+ """
+ self._write_callback = None
+ self._write_future = None
+ self._close_callback = None
+ if self.stream is not None:
+ self.stream.set_close_callback(None)
+
+ def set_close_callback(self, callback):
+ """Sets a callback that will be run when the connection is closed.
+
+ .. deprecated:: 4.0
+ Use `.HTTPMessageDelegate.on_connection_close` instead.
+ """
+ self._close_callback = stack_context.wrap(callback)
+
+ def _on_connection_close(self):
+ # Note that this callback is only registered on the IOStream
+ # when we have finished reading the request and are waiting for
+ # the application to produce its response.
+ if self._close_callback is not None:
+ callback = self._close_callback
+ self._close_callback = None
+ callback()
+ if not self._finish_future.done():
+ self._finish_future.set_result(None)
+ self._clear_callbacks()
+
+ def close(self):
+ if self.stream is not None:
+ self.stream.close()
+ self._clear_callbacks()
+ if not self._finish_future.done():
+ self._finish_future.set_result(None)
+
+ def detach(self):
+ """Take control of the underlying stream.
+
+ Returns the underlying `.IOStream` object and stops all further
+ HTTP processing. May only be called during
+ `.HTTPMessageDelegate.headers_received`. Intended for implementing
+ protocols like websockets that tunnel over an HTTP handshake.
+ """
+ self._clear_callbacks()
+ stream = self.stream
+ self.stream = None
+ if not self._finish_future.done():
+ self._finish_future.set_result(None)
+ return stream
+
+ def set_body_timeout(self, timeout):
+ """Sets the body timeout for a single request.
+
+ Overrides the value from `.HTTP1ConnectionParameters`.
+ """
+ self._body_timeout = timeout
+
+ def set_max_body_size(self, max_body_size):
+ """Sets the body size limit for a single request.
+
+ Overrides the value from `.HTTP1ConnectionParameters`.
+ """
+ self._max_body_size = max_body_size
+
+ def write_headers(self, start_line, headers, chunk=None, callback=None):
+ """Implements `.HTTPConnection.write_headers`."""
+ lines = []
+ if self.is_client:
+ self._request_start_line = start_line
+ lines.append(utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
+ # Client requests with a non-empty body must have either a
+ # Content-Length or a Transfer-Encoding.
+ self._chunking_output = (
+ start_line.method in ('POST', 'PUT', 'PATCH') and
+ 'Content-Length' not in headers and
+ 'Transfer-Encoding' not in headers)
+ else:
+ self._response_start_line = start_line
+ lines.append(utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2])))
+ self._chunking_output = (
+ # TODO: should this use
+ # self._request_start_line.version or
+ # start_line.version?
+ self._request_start_line.version == 'HTTP/1.1' and
+ # 1xx, 204 and 304 responses have no body (not even a zero-length
+ # body), and so should not have either Content-Length or
+ # Transfer-Encoding headers.
+ start_line.code not in (204, 304) and
+ (start_line.code < 100 or start_line.code >= 200) and
+ # No need to chunk the output if a Content-Length is specified.
+ 'Content-Length' not in headers and
+ # Applications are discouraged from touching Transfer-Encoding,
+ # but if they do, leave it alone.
+ 'Transfer-Encoding' not in headers)
+ # If a 1.0 client asked for keep-alive, add the header.
+ if (self._request_start_line.version == 'HTTP/1.0' and
+ (self._request_headers.get('Connection', '').lower() ==
+ 'keep-alive')):
+ headers['Connection'] = 'Keep-Alive'
+ if self._chunking_output:
+ headers['Transfer-Encoding'] = 'chunked'
+ if (not self.is_client and
+ (self._request_start_line.method == 'HEAD' or
+ start_line.code == 304)):
+ self._expected_content_remaining = 0
+ elif 'Content-Length' in headers:
+ self._expected_content_remaining = int(headers['Content-Length'])
+ else:
+ self._expected_content_remaining = None
+ # TODO: headers are supposed to be of type str, but we still have some
+ # cases that let bytes slip through. Remove these native_str calls when those
+ # are fixed.
+ header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all())
+ if PY3:
+ lines.extend(l.encode('latin1') for l in header_lines)
+ else:
+ lines.extend(header_lines)
+ for line in lines:
+ if b'\n' in line:
+ raise ValueError('Newline in header: ' + repr(line))
+ future = None
+ if self.stream.closed():
+ future = self._write_future = Future()
+ future.set_exception(iostream.StreamClosedError())
+ future.exception()
+ else:
+ if callback is not None:
+ self._write_callback = stack_context.wrap(callback)
+ else:
+ future = self._write_future = Future()
+ data = b"\r\n".join(lines) + b"\r\n\r\n"
+ if chunk:
+ data += self._format_chunk(chunk)
+ self._pending_write = self.stream.write(data)
+ self._pending_write.add_done_callback(self._on_write_complete)
+ return future
+
+ def _format_chunk(self, chunk):
+ if self._expected_content_remaining is not None:
+ self._expected_content_remaining -= len(chunk)
+ if self._expected_content_remaining < 0:
+ # Close the stream now to stop further framing errors.
+ self.stream.close()
+ raise httputil.HTTPOutputError(
+ "Tried to write more data than Content-Length")
+ if self._chunking_output and chunk:
+ # Don't write out empty chunks because that means END-OF-STREAM
+ # with chunked encoding
+ return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
+ else:
+ return chunk
+
+ def write(self, chunk, callback=None):
+ """Implements `.HTTPConnection.write`.
+
+ For backwards compatibility is is allowed but deprecated to
+ skip `write_headers` and instead call `write()` with a
+ pre-encoded header block.
+ """
+ future = None
+ if self.stream.closed():
+ future = self._write_future = Future()
+ self._write_future.set_exception(iostream.StreamClosedError())
+ self._write_future.exception()
+ else:
+ if callback is not None:
+ self._write_callback = stack_context.wrap(callback)
+ else:
+ future = self._write_future = Future()
+ self._pending_write = self.stream.write(self._format_chunk(chunk))
+ self._pending_write.add_done_callback(self._on_write_complete)
+ return future
+
+ def finish(self):
+ """Implements `.HTTPConnection.finish`."""
+ if (self._expected_content_remaining is not None and
+ self._expected_content_remaining != 0 and
+ not self.stream.closed()):
+ self.stream.close()
+ raise httputil.HTTPOutputError(
+ "Tried to write %d bytes less than Content-Length" %
+ self._expected_content_remaining)
+ if self._chunking_output:
+ if not self.stream.closed():
+ self._pending_write = self.stream.write(b"0\r\n\r\n")
+ self._pending_write.add_done_callback(self._on_write_complete)
+ self._write_finished = True
+ # If the app finished the request while we're still reading,
+ # divert any remaining data away from the delegate and
+ # close the connection when we're done sending our response.
+ # Closing the connection is the only way to avoid reading the
+ # whole input body.
+ if not self._read_finished:
+ self._disconnect_on_finish = True
+ # No more data is coming, so instruct TCP to send any remaining
+ # data immediately instead of waiting for a full packet or ack.
+ self.stream.set_nodelay(True)
+ if self._pending_write is None:
+ self._finish_request(None)
+ else:
+ self._pending_write.add_done_callback(self._finish_request)
+
+ def _on_write_complete(self, future):
+ exc = future.exception()
+ if exc is not None and not isinstance(exc, iostream.StreamClosedError):
+ future.result()
+ if self._write_callback is not None:
+ callback = self._write_callback
+ self._write_callback = None
+ self.stream.io_loop.add_callback(callback)
+ if self._write_future is not None:
+ future = self._write_future
+ self._write_future = None
+ future.set_result(None)
+
+ def _can_keep_alive(self, start_line, headers):
+ if self.params.no_keep_alive:
+ return False
+ connection_header = headers.get("Connection")
+ if connection_header is not None:
+ connection_header = connection_header.lower()
+ if start_line.version == "HTTP/1.1":
+ return connection_header != "close"
+ elif ("Content-Length" in headers or
+ headers.get("Transfer-Encoding", "").lower() == "chunked" or
+ getattr(start_line, 'method', None) in ("HEAD", "GET")):
+ # start_line may be a request or response start line; only
+ # the former has a method attribute.
+ return connection_header == "keep-alive"
+ return False
+
+ def _finish_request(self, future):
+ self._clear_callbacks()
+ if not self.is_client and self._disconnect_on_finish:
+ self.close()
+ return
+ # Turn Nagle's algorithm back on, leaving the stream in its
+ # default state for the next request.
+ self.stream.set_nodelay(False)
+ if not self._finish_future.done():
+ self._finish_future.set_result(None)
+
+ def _parse_headers(self, data):
+ # The lstrip removes newlines that some implementations sometimes
+ # insert between messages of a reused connection. Per RFC 7230,
+ # we SHOULD ignore at least one empty line before the request.
+ # http://tools.ietf.org/html/rfc7230#section-3.5
+ data = native_str(data.decode('latin1')).lstrip("\r\n")
+ # RFC 7230 section allows for both CRLF and bare LF.
+ eol = data.find("\n")
+ start_line = data[:eol].rstrip("\r")
+ try:
+ headers = httputil.HTTPHeaders.parse(data[eol:])
+ except ValueError:
+ # probably form split() if there was no ':' in the line
+ raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
+ data[eol:100])
+ return start_line, headers
+
+ def _read_body(self, code, headers, delegate):
+ if "Content-Length" in headers:
+ if "Transfer-Encoding" in headers:
+ # Response cannot contain both Content-Length and
+ # Transfer-Encoding headers.
+ # http://tools.ietf.org/html/rfc7230#section-3.3.3
+ raise httputil.HTTPInputError(
+ "Response with both Transfer-Encoding and Content-Length")
+ if "," in headers["Content-Length"]:
+ # Proxies sometimes cause Content-Length headers to get
+ # duplicated. If all the values are identical then we can
+ # use them but if they differ it's an error.
+ pieces = re.split(r',\s*', headers["Content-Length"])
+ if any(i != pieces[0] for i in pieces):
+ raise httputil.HTTPInputError(
+ "Multiple unequal Content-Lengths: %r" %
+ headers["Content-Length"])
+ headers["Content-Length"] = pieces[0]
+
+ try:
+ content_length = int(headers["Content-Length"])
+ except ValueError:
+ # Handles non-integer Content-Length value.
+ raise httputil.HTTPInputError(
+ "Only integer Content-Length is allowed: %s" % headers["Content-Length"])
+
+ if content_length > self._max_body_size:
+ raise httputil.HTTPInputError("Content-Length too long")
+ else:
+ content_length = None
+
+ if code == 204:
+ # This response code is not allowed to have a non-empty body,
+ # and has an implicit length of zero instead of read-until-close.
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
+ if ("Transfer-Encoding" in headers or
+ content_length not in (None, 0)):
+ raise httputil.HTTPInputError(
+ "Response with code %d should not have body" % code)
+ content_length = 0
+
+ if content_length is not None:
+ return self._read_fixed_body(content_length, delegate)
+ if headers.get("Transfer-Encoding", "").lower() == "chunked":
+ return self._read_chunked_body(delegate)
+ if self.is_client:
+ return self._read_body_until_close(delegate)
+ return None
+
+ @gen.coroutine
+ def _read_fixed_body(self, content_length, delegate):
+ while content_length > 0:
+ body = yield self.stream.read_bytes(
+ min(self.params.chunk_size, content_length), partial=True)
+ content_length -= len(body)
+ if not self._write_finished or self.is_client:
+ with _ExceptionLoggingContext(app_log):
+ ret = delegate.data_received(body)
+ if ret is not None:
+ yield ret
+
+ @gen.coroutine
+ def _read_chunked_body(self, delegate):
+ # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
+ total_size = 0
+ while True:
+ chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
+ chunk_len = int(chunk_len.strip(), 16)
+ if chunk_len == 0:
+ crlf = yield self.stream.read_bytes(2)
+ if crlf != b'\r\n':
+ raise httputil.HTTPInputError("improperly terminated chunked request")
+ return
+ total_size += chunk_len
+ if total_size > self._max_body_size:
+ raise httputil.HTTPInputError("chunked body too large")
+ bytes_to_read = chunk_len
+ while bytes_to_read:
+ chunk = yield self.stream.read_bytes(
+ min(bytes_to_read, self.params.chunk_size), partial=True)
+ bytes_to_read -= len(chunk)
+ if not self._write_finished or self.is_client:
+ with _ExceptionLoggingContext(app_log):
+ ret = delegate.data_received(chunk)
+ if ret is not None:
+ yield ret
+ # chunk ends with \r\n
+ crlf = yield self.stream.read_bytes(2)
+ assert crlf == b"\r\n"
+
+ @gen.coroutine
+ def _read_body_until_close(self, delegate):
+ body = yield self.stream.read_until_close()
+ if not self._write_finished or self.is_client:
+ with _ExceptionLoggingContext(app_log):
+ delegate.data_received(body)
+
+
+class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
+ """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
+ """
+ def __init__(self, delegate, chunk_size):
+ self._delegate = delegate
+ self._chunk_size = chunk_size
+ self._decompressor = None
+
+ def headers_received(self, start_line, headers):
+ if headers.get("Content-Encoding") == "gzip":
+ self._decompressor = GzipDecompressor()
+ # Downstream delegates will only see uncompressed data,
+ # so rename the content-encoding header.
+ # (but note that curl_httpclient doesn't do this).
+ headers.add("X-Consumed-Content-Encoding",
+ headers["Content-Encoding"])
+ del headers["Content-Encoding"]
+ return self._delegate.headers_received(start_line, headers)
+
+ @gen.coroutine
+ def data_received(self, chunk):
+ if self._decompressor:
+ compressed_data = chunk
+ while compressed_data:
+ decompressed = self._decompressor.decompress(
+ compressed_data, self._chunk_size)
+ if decompressed:
+ ret = self._delegate.data_received(decompressed)
+ if ret is not None:
+ yield ret
+ compressed_data = self._decompressor.unconsumed_tail
+ else:
+ ret = self._delegate.data_received(chunk)
+ if ret is not None:
+ yield ret
+
+ def finish(self):
+ if self._decompressor is not None:
+ tail = self._decompressor.flush()
+ if tail:
+ # I believe the tail will always be empty (i.e.
+ # decompress will return all it can). The purpose
+ # of the flush call is to detect errors such
+ # as truncated input. But in case it ever returns
+ # anything, treat it as an extra chunk
+ self._delegate.data_received(tail)
+ return self._delegate.finish()
+
+ def on_connection_close(self):
+ return self._delegate.on_connection_close()
+
+
+class HTTP1ServerConnection(object):
+ """An HTTP/1.x server."""
+ def __init__(self, stream, params=None, context=None):
+ """
+ :arg stream: an `.IOStream`
+ :arg params: a `.HTTP1ConnectionParameters` or None
+ :arg context: an opaque application-defined object that is accessible
+ as ``connection.context``
+ """
+ self.stream = stream
+ if params is None:
+ params = HTTP1ConnectionParameters()
+ self.params = params
+ self.context = context
+ self._serving_future = None
+
+ @gen.coroutine
+ def close(self):
+ """Closes the connection.
+
+ Returns a `.Future` that resolves after the serving loop has exited.
+ """
+ self.stream.close()
+ # Block until the serving loop is done, but ignore any exceptions
+ # (start_serving is already responsible for logging them).
+ try:
+ yield self._serving_future
+ except Exception:
+ pass
+
+ def start_serving(self, delegate):
+ """Starts serving requests on this connection.
+
+ :arg delegate: a `.HTTPServerConnectionDelegate`
+ """
+ assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
+ self._serving_future = self._server_request_loop(delegate)
+ # Register the future on the IOLoop so its errors get logged.
+ self.stream.io_loop.add_future(self._serving_future,
+ lambda f: f.result())
+
+ @gen.coroutine
+ def _server_request_loop(self, delegate):
+ try:
+ while True:
+ conn = HTTP1Connection(self.stream, False,
+ self.params, self.context)
+ request_delegate = delegate.start_request(self, conn)
+ try:
+ ret = yield conn.read_response(request_delegate)
+ except (iostream.StreamClosedError,
+ iostream.UnsatisfiableReadError):
+ return
+ except _QuietException:
+ # This exception was already logged.
+ conn.close()
+ return
+ except Exception:
+ gen_log.error("Uncaught exception", exc_info=True)
+ conn.close()
+ return
+ if not ret:
+ return
+ yield gen.moment
+ finally:
+ delegate.on_close(self)
diff --git a/contrib/python/tornado/tornado-4/tornado/httpclient.py b/contrib/python/tornado/tornado-4/tornado/httpclient.py
index 8436ece469..1e7e5f2a2f 100644
--- a/contrib/python/tornado/tornado-4/tornado/httpclient.py
+++ b/contrib/python/tornado/tornado-4/tornado/httpclient.py
@@ -1,678 +1,678 @@
-"""Blocking and non-blocking HTTP client interfaces.
-
-This module defines a common interface shared by two implementations,
-``simple_httpclient`` and ``curl_httpclient``. Applications may either
-instantiate their chosen implementation class directly or use the
-`AsyncHTTPClient` class from this module, which selects an implementation
-that can be overridden with the `AsyncHTTPClient.configure` method.
-
-The default implementation is ``simple_httpclient``, and this is expected
-to be suitable for most users' needs. However, some applications may wish
-to switch to ``curl_httpclient`` for reasons such as the following:
-
-* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
- including support for HTTP proxies and the ability to use a specified
- network interface.
-
-* ``curl_httpclient`` is more likely to be compatible with sites that are
- not-quite-compliant with the HTTP spec, or sites that use little-exercised
- features of HTTP.
-
-* ``curl_httpclient`` is faster.
-
-* ``curl_httpclient`` was the default prior to Tornado 2.0.
-
-Note that if you are using ``curl_httpclient``, it is highly
-recommended that you use a recent version of ``libcurl`` and
-``pycurl``. Currently the minimum supported version of libcurl is
-7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
-recommended that your ``libcurl`` installation is built with
-asynchronous DNS resolver (threaded or c-ares), otherwise you may
-encounter various problems with request timeouts (for more
-information, see
-http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
-and comments in curl_httpclient.py).
-
-To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
-
- AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import functools
-import time
-import weakref
-
-from tornado.concurrent import TracebackFuture
-from tornado.escape import utf8, native_str
-from tornado import httputil, stack_context
-from tornado.ioloop import IOLoop
-from tornado.util import Configurable
-
-
-class HTTPClient(object):
- """A blocking HTTP client.
-
- This interface is provided for convenience and testing; most applications
- that are running an IOLoop will want to use `AsyncHTTPClient` instead.
- Typical usage looks like this::
-
- http_client = httpclient.HTTPClient()
- try:
- response = http_client.fetch("http://www.google.com/")
- print(response.body)
- except httpclient.HTTPError as e:
- # HTTPError is raised for non-200 responses; the response
- # can be found in e.response.
- print("Error: " + str(e))
- except Exception as e:
- # Other errors are possible, such as IOError.
- print("Error: " + str(e))
- http_client.close()
- """
- def __init__(self, async_client_class=None, **kwargs):
- self._io_loop = IOLoop(make_current=False)
- if async_client_class is None:
- async_client_class = AsyncHTTPClient
- self._async_client = async_client_class(self._io_loop, **kwargs)
- self._closed = False
-
- def __del__(self):
- self.close()
-
- def close(self):
- """Closes the HTTPClient, freeing any resources used."""
- if not self._closed:
- self._async_client.close()
- self._io_loop.close()
- self._closed = True
-
- def fetch(self, request, **kwargs):
- """Executes a request, returning an `HTTPResponse`.
-
- The request may be either a string URL or an `HTTPRequest` object.
- If it is a string, we construct an `HTTPRequest` using any additional
- kwargs: ``HTTPRequest(request, **kwargs)``
-
- If an error occurs during the fetch, we raise an `HTTPError` unless
- the ``raise_error`` keyword argument is set to False.
- """
- response = self._io_loop.run_sync(functools.partial(
- self._async_client.fetch, request, **kwargs))
- return response
-
-
-class AsyncHTTPClient(Configurable):
- """An non-blocking HTTP client.
-
- Example usage::
-
- def handle_response(response):
- if response.error:
- print("Error: %s" % response.error)
- else:
- print(response.body)
-
- http_client = AsyncHTTPClient()
- http_client.fetch("http://www.google.com/", handle_response)
-
- The constructor for this class is magic in several respects: It
- actually creates an instance of an implementation-specific
- subclass, and instances are reused as a kind of pseudo-singleton
- (one per `.IOLoop`). The keyword argument ``force_instance=True``
- can be used to suppress this singleton behavior. Unless
- ``force_instance=True`` is used, no arguments other than
- ``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
- The implementation subclass as well as arguments to its
- constructor can be set with the static method `configure()`
-
- All `AsyncHTTPClient` implementations support a ``defaults``
- keyword argument, which can be used to set default values for
- `HTTPRequest` attributes. For example::
-
- AsyncHTTPClient.configure(
- None, defaults=dict(user_agent="MyUserAgent"))
- # or with force_instance:
- client = AsyncHTTPClient(force_instance=True,
- defaults=dict(user_agent="MyUserAgent"))
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- @classmethod
- def configurable_base(cls):
- return AsyncHTTPClient
-
- @classmethod
- def configurable_default(cls):
- from tornado.simple_httpclient import SimpleAsyncHTTPClient
- return SimpleAsyncHTTPClient
-
- @classmethod
- def _async_clients(cls):
- attr_name = '_async_client_dict_' + cls.__name__
- if not hasattr(cls, attr_name):
- setattr(cls, attr_name, weakref.WeakKeyDictionary())
- return getattr(cls, attr_name)
-
- def __new__(cls, io_loop=None, force_instance=False, **kwargs):
- io_loop = io_loop or IOLoop.current()
- if force_instance:
- instance_cache = None
- else:
- instance_cache = cls._async_clients()
- if instance_cache is not None and io_loop in instance_cache:
- return instance_cache[io_loop]
- instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
- **kwargs)
- # Make sure the instance knows which cache to remove itself from.
- # It can't simply call _async_clients() because we may be in
- # __new__(AsyncHTTPClient) but instance.__class__ may be
- # SimpleAsyncHTTPClient.
- instance._instance_cache = instance_cache
- if instance_cache is not None:
- instance_cache[instance.io_loop] = instance
- return instance
-
- def initialize(self, io_loop, defaults=None):
- self.io_loop = io_loop
- self.defaults = dict(HTTPRequest._DEFAULTS)
- if defaults is not None:
- self.defaults.update(defaults)
- self._closed = False
-
- def close(self):
- """Destroys this HTTP client, freeing any file descriptors used.
-
- This method is **not needed in normal use** due to the way
- that `AsyncHTTPClient` objects are transparently reused.
- ``close()`` is generally only necessary when either the
- `.IOLoop` is also being closed, or the ``force_instance=True``
- argument was used when creating the `AsyncHTTPClient`.
-
- No other methods may be called on the `AsyncHTTPClient` after
- ``close()``.
-
- """
- if self._closed:
- return
- self._closed = True
- if self._instance_cache is not None:
- if self._instance_cache.get(self.io_loop) is not self:
- raise RuntimeError("inconsistent AsyncHTTPClient cache")
- del self._instance_cache[self.io_loop]
-
- def fetch(self, request, callback=None, raise_error=True, **kwargs):
- """Executes a request, asynchronously returning an `HTTPResponse`.
-
- The request may be either a string URL or an `HTTPRequest` object.
- If it is a string, we construct an `HTTPRequest` using any additional
- kwargs: ``HTTPRequest(request, **kwargs)``
-
- This method returns a `.Future` whose result is an
- `HTTPResponse`. By default, the ``Future`` will raise an
- `HTTPError` if the request returned a non-200 response code
- (other errors may also be raised if the server could not be
- contacted). Instead, if ``raise_error`` is set to False, the
- response will always be returned regardless of the response
- code.
-
- If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
- In the callback interface, `HTTPError` is not automatically raised.
- Instead, you must check the response's ``error`` attribute or
- call its `~HTTPResponse.rethrow` method.
- """
- if self._closed:
- raise RuntimeError("fetch() called on closed AsyncHTTPClient")
- if not isinstance(request, HTTPRequest):
- request = HTTPRequest(url=request, **kwargs)
- else:
- if kwargs:
- raise ValueError("kwargs can't be used if request is an HTTPRequest object")
- # We may modify this (to add Host, Accept-Encoding, etc),
- # so make sure we don't modify the caller's object. This is also
- # where normal dicts get converted to HTTPHeaders objects.
- request.headers = httputil.HTTPHeaders(request.headers)
- request = _RequestProxy(request, self.defaults)
- future = TracebackFuture()
- if callback is not None:
- callback = stack_context.wrap(callback)
-
- def handle_future(future):
- exc = future.exception()
- if isinstance(exc, HTTPError) and exc.response is not None:
- response = exc.response
- elif exc is not None:
- response = HTTPResponse(
- request, 599, error=exc,
- request_time=time.time() - request.start_time)
- else:
- response = future.result()
- self.io_loop.add_callback(callback, response)
- future.add_done_callback(handle_future)
-
- def handle_response(response):
- if raise_error and response.error:
- future.set_exception(response.error)
- else:
- future.set_result(response)
- self.fetch_impl(request, handle_response)
- return future
-
- def fetch_impl(self, request, callback):
- raise NotImplementedError()
-
- @classmethod
- def configure(cls, impl, **kwargs):
- """Configures the `AsyncHTTPClient` subclass to use.
-
- ``AsyncHTTPClient()`` actually creates an instance of a subclass.
- This method may be called with either a class object or the
- fully-qualified name of such a class (or ``None`` to use the default,
- ``SimpleAsyncHTTPClient``)
-
- If additional keyword arguments are given, they will be passed
- to the constructor of each subclass instance created. The
- keyword argument ``max_clients`` determines the maximum number
- of simultaneous `~AsyncHTTPClient.fetch()` operations that can
- execute in parallel on each `.IOLoop`. Additional arguments
- may be supported depending on the implementation class in use.
-
- Example::
-
- AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
- """
- super(AsyncHTTPClient, cls).configure(impl, **kwargs)
-
-
-class HTTPRequest(object):
- """HTTP client request object."""
-
- # Default values for HTTPRequest parameters.
- # Merged with the values on the request object by AsyncHTTPClient
- # implementations.
- _DEFAULTS = dict(
- connect_timeout=20.0,
- request_timeout=20.0,
- follow_redirects=True,
- max_redirects=5,
- decompress_response=True,
- proxy_password='',
- allow_nonstandard_methods=False,
- validate_cert=True)
-
- def __init__(self, url, method="GET", headers=None, body=None,
- auth_username=None, auth_password=None, auth_mode=None,
- connect_timeout=None, request_timeout=None,
- if_modified_since=None, follow_redirects=None,
- max_redirects=None, user_agent=None, use_gzip=None,
- network_interface=None, streaming_callback=None,
- header_callback=None, prepare_curl_callback=None,
- proxy_host=None, proxy_port=None, proxy_username=None,
- proxy_password=None, proxy_auth_mode=None,
- allow_nonstandard_methods=None, validate_cert=None,
- ca_certs=None, allow_ipv6=None, client_key=None,
- client_cert=None, body_producer=None,
- expect_100_continue=False, decompress_response=None,
- ssl_options=None):
- r"""All parameters except ``url`` are optional.
-
- :arg string url: URL to fetch
- :arg string method: HTTP method, e.g. "GET" or "POST"
- :arg headers: Additional HTTP headers to pass on the request
- :type headers: `~tornado.httputil.HTTPHeaders` or `dict`
- :arg body: HTTP request body as a string (byte or unicode; if unicode
- the utf-8 encoding will be used)
- :arg body_producer: Callable used for lazy/asynchronous request bodies.
- It is called with one argument, a ``write`` function, and should
- return a `.Future`. It should call the write function with new
- data as it becomes available. The write function returns a
- `.Future` which can be used for flow control.
- Only one of ``body`` and ``body_producer`` may
- be specified. ``body_producer`` is not supported on
- ``curl_httpclient``. When using ``body_producer`` it is recommended
- to pass a ``Content-Length`` in the headers as otherwise chunked
- encoding will be used, and many servers do not support chunked
- encoding on requests. New in Tornado 4.0
- :arg string auth_username: Username for HTTP authentication
- :arg string auth_password: Password for HTTP authentication
- :arg string auth_mode: Authentication mode; default is "basic".
- Allowed values are implementation-defined; ``curl_httpclient``
- supports "basic" and "digest"; ``simple_httpclient`` only supports
- "basic"
- :arg float connect_timeout: Timeout for initial connection in seconds,
- default 20 seconds
- :arg float request_timeout: Timeout for entire request in seconds,
- default 20 seconds
- :arg if_modified_since: Timestamp for ``If-Modified-Since`` header
- :type if_modified_since: `datetime` or `float`
- :arg bool follow_redirects: Should redirects be followed automatically
- or return the 3xx response? Default True.
- :arg int max_redirects: Limit for ``follow_redirects``, default 5.
- :arg string user_agent: String to send as ``User-Agent`` header
- :arg bool decompress_response: Request a compressed response from
- the server and decompress it after downloading. Default is True.
- New in Tornado 4.0.
- :arg bool use_gzip: Deprecated alias for ``decompress_response``
- since Tornado 4.0.
- :arg string network_interface: Network interface to use for request.
- ``curl_httpclient`` only; see note below.
- :arg callable streaming_callback: If set, ``streaming_callback`` will
- be run with each chunk of data as it is received, and
- ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
- the final response.
- :arg callable header_callback: If set, ``header_callback`` will
- be run with each header line as it is received (including the
- first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
- containing only ``\r\n``. All lines include the trailing newline
- characters). ``HTTPResponse.headers`` will be empty in the final
- response. This is most useful in conjunction with
- ``streaming_callback``, because it's the only way to get access to
- header data while the request is in progress.
- :arg callable prepare_curl_callback: If set, will be called with
- a ``pycurl.Curl`` object to allow the application to make additional
- ``setopt`` calls.
- :arg string proxy_host: HTTP proxy hostname. To use proxies,
- ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
- ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
- currently only supported with ``curl_httpclient``.
- :arg int proxy_port: HTTP proxy port
- :arg string proxy_username: HTTP proxy username
- :arg string proxy_password: HTTP proxy password
- :arg string proxy_auth_mode: HTTP proxy Authentication mode;
- default is "basic". supports "basic" and "digest"
- :arg bool allow_nonstandard_methods: Allow unknown values for ``method``
- argument? Default is False.
- :arg bool validate_cert: For HTTPS requests, validate the server's
- certificate? Default is True.
- :arg string ca_certs: filename of CA certificates in PEM format,
- or None to use defaults. See note below when used with
- ``curl_httpclient``.
- :arg string client_key: Filename for client SSL key, if any. See
- note below when used with ``curl_httpclient``.
- :arg string client_cert: Filename for client SSL certificate, if any.
- See note below when used with ``curl_httpclient``.
- :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
- ``simple_httpclient`` (unsupported by ``curl_httpclient``).
- Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
- and ``client_cert``.
- :arg bool allow_ipv6: Use IPv6 when available? Default is true.
- :arg bool expect_100_continue: If true, send the
- ``Expect: 100-continue`` header and wait for a continue response
- before sending the request body. Only supported with
- simple_httpclient.
-
- .. note::
-
- When using ``curl_httpclient`` certain options may be
- inherited by subsequent fetches because ``pycurl`` does
- not allow them to be cleanly reset. This applies to the
- ``ca_certs``, ``client_key``, ``client_cert``, and
- ``network_interface`` arguments. If you use these
- options, you should pass them on every request (you don't
- have to always use the same values, but it's not possible
- to mix requests that specify these options with ones that
- use the defaults).
-
- .. versionadded:: 3.1
- The ``auth_mode`` argument.
-
- .. versionadded:: 4.0
- The ``body_producer`` and ``expect_100_continue`` arguments.
-
- .. versionadded:: 4.2
- The ``ssl_options`` argument.
-
- .. versionadded:: 4.5
- The ``proxy_auth_mode`` argument.
- """
- # Note that some of these attributes go through property setters
- # defined below.
- self.headers = headers
- if if_modified_since:
- self.headers["If-Modified-Since"] = httputil.format_timestamp(
- if_modified_since)
- self.proxy_host = proxy_host
- self.proxy_port = proxy_port
- self.proxy_username = proxy_username
- self.proxy_password = proxy_password
- self.proxy_auth_mode = proxy_auth_mode
- self.url = url
- self.method = method
- self.body = body
- self.body_producer = body_producer
- self.auth_username = auth_username
- self.auth_password = auth_password
- self.auth_mode = auth_mode
- self.connect_timeout = connect_timeout
- self.request_timeout = request_timeout
- self.follow_redirects = follow_redirects
- self.max_redirects = max_redirects
- self.user_agent = user_agent
- if decompress_response is not None:
- self.decompress_response = decompress_response
- else:
- self.decompress_response = use_gzip
- self.network_interface = network_interface
- self.streaming_callback = streaming_callback
- self.header_callback = header_callback
- self.prepare_curl_callback = prepare_curl_callback
- self.allow_nonstandard_methods = allow_nonstandard_methods
- self.validate_cert = validate_cert
- self.ca_certs = ca_certs
- self.allow_ipv6 = allow_ipv6
- self.client_key = client_key
- self.client_cert = client_cert
- self.ssl_options = ssl_options
- self.expect_100_continue = expect_100_continue
- self.start_time = time.time()
-
- @property
- def headers(self):
- return self._headers
-
- @headers.setter
- def headers(self, value):
- if value is None:
- self._headers = httputil.HTTPHeaders()
- else:
- self._headers = value
-
- @property
- def body(self):
- return self._body
-
- @body.setter
- def body(self, value):
- self._body = utf8(value)
-
- @property
- def body_producer(self):
- return self._body_producer
-
- @body_producer.setter
- def body_producer(self, value):
- self._body_producer = stack_context.wrap(value)
-
- @property
- def streaming_callback(self):
- return self._streaming_callback
-
- @streaming_callback.setter
- def streaming_callback(self, value):
- self._streaming_callback = stack_context.wrap(value)
-
- @property
- def header_callback(self):
- return self._header_callback
-
- @header_callback.setter
- def header_callback(self, value):
- self._header_callback = stack_context.wrap(value)
-
- @property
- def prepare_curl_callback(self):
- return self._prepare_curl_callback
-
- @prepare_curl_callback.setter
- def prepare_curl_callback(self, value):
- self._prepare_curl_callback = stack_context.wrap(value)
-
-
-class HTTPResponse(object):
- """HTTP Response object.
-
- Attributes:
-
- * request: HTTPRequest object
-
- * code: numeric HTTP status code, e.g. 200 or 404
-
- * reason: human-readable reason phrase describing the status code
-
- * headers: `tornado.httputil.HTTPHeaders` object
-
- * effective_url: final location of the resource after following any
- redirects
-
- * buffer: ``cStringIO`` object for response body
-
- * body: response body as bytes (created on demand from ``self.buffer``)
-
- * error: Exception object, if any
-
- * request_time: seconds from request start to finish
-
- * time_info: dictionary of diagnostic timing information from the request.
- Available data are subject to change, but currently uses timings
- available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
- plus ``queue``, which is the delay (if any) introduced by waiting for
- a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
- """
- def __init__(self, request, code, headers=None, buffer=None,
- effective_url=None, error=None, request_time=None,
- time_info=None, reason=None):
- if isinstance(request, _RequestProxy):
- self.request = request.request
- else:
- self.request = request
- self.code = code
- self.reason = reason or httputil.responses.get(code, "Unknown")
- if headers is not None:
- self.headers = headers
- else:
- self.headers = httputil.HTTPHeaders()
- self.buffer = buffer
- self._body = None
- if effective_url is None:
- self.effective_url = request.url
- else:
- self.effective_url = effective_url
- if error is None:
- if self.code < 200 or self.code >= 300:
- self.error = HTTPError(self.code, message=self.reason,
- response=self)
- else:
- self.error = None
- else:
- self.error = error
- self.request_time = request_time
- self.time_info = time_info or {}
-
- @property
- def body(self):
- if self.buffer is None:
- return None
- elif self._body is None:
- self._body = self.buffer.getvalue()
-
- return self._body
-
- def rethrow(self):
- """If there was an error on the request, raise an `HTTPError`."""
- if self.error:
- raise self.error
-
- def __repr__(self):
- args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
- return "%s(%s)" % (self.__class__.__name__, args)
-
-
-class HTTPError(Exception):
- """Exception thrown for an unsuccessful HTTP request.
-
- Attributes:
-
- * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
- used when no HTTP response was received, e.g. for a timeout.
-
- * ``response`` - `HTTPResponse` object, if any.
-
- Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
- and you can look at ``error.response.headers['Location']`` to see the
- destination of the redirect.
- """
- def __init__(self, code, message=None, response=None):
- self.code = code
- self.message = message or httputil.responses.get(code, "Unknown")
- self.response = response
- super(HTTPError, self).__init__(code, message, response)
-
- def __str__(self):
- return "HTTP %d: %s" % (self.code, self.message)
-
- # There is a cyclic reference between self and self.response,
- # which breaks the default __repr__ implementation.
- # (especially on pypy, which doesn't have the same recursion
- # detection as cpython).
- __repr__ = __str__
-
-
-class _RequestProxy(object):
- """Combines an object with a dictionary of defaults.
-
- Used internally by AsyncHTTPClient implementations.
- """
- def __init__(self, request, defaults):
- self.request = request
- self.defaults = defaults
-
- def __getattr__(self, name):
- request_attr = getattr(self.request, name)
- if request_attr is not None:
- return request_attr
- elif self.defaults is not None:
- return self.defaults.get(name, None)
- else:
- return None
-
-
-def main():
- from tornado.options import define, options, parse_command_line
- define("print_headers", type=bool, default=False)
- define("print_body", type=bool, default=True)
- define("follow_redirects", type=bool, default=True)
- define("validate_cert", type=bool, default=True)
- args = parse_command_line()
- client = HTTPClient()
- for arg in args:
- try:
- response = client.fetch(arg,
- follow_redirects=options.follow_redirects,
- validate_cert=options.validate_cert,
- )
- except HTTPError as e:
- if e.response is not None:
- response = e.response
- else:
- raise
- if options.print_headers:
- print(response.headers)
- if options.print_body:
- print(native_str(response.body))
- client.close()
-
-
-if __name__ == "__main__":
- main()
+"""Blocking and non-blocking HTTP client interfaces.
+
+This module defines a common interface shared by two implementations,
+``simple_httpclient`` and ``curl_httpclient``. Applications may either
+instantiate their chosen implementation class directly or use the
+`AsyncHTTPClient` class from this module, which selects an implementation
+that can be overridden with the `AsyncHTTPClient.configure` method.
+
+The default implementation is ``simple_httpclient``, and this is expected
+to be suitable for most users' needs. However, some applications may wish
+to switch to ``curl_httpclient`` for reasons such as the following:
+
+* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
+ including support for HTTP proxies and the ability to use a specified
+ network interface.
+
+* ``curl_httpclient`` is more likely to be compatible with sites that are
+ not-quite-compliant with the HTTP spec, or sites that use little-exercised
+ features of HTTP.
+
+* ``curl_httpclient`` is faster.
+
+* ``curl_httpclient`` was the default prior to Tornado 2.0.
+
+Note that if you are using ``curl_httpclient``, it is highly
+recommended that you use a recent version of ``libcurl`` and
+``pycurl``. Currently the minimum supported version of libcurl is
+7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
+recommended that your ``libcurl`` installation is built with
+asynchronous DNS resolver (threaded or c-ares), otherwise you may
+encounter various problems with request timeouts (for more
+information, see
+http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
+and comments in curl_httpclient.py).
+
+To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
+
+ AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import functools
+import time
+import weakref
+
+from tornado.concurrent import TracebackFuture
+from tornado.escape import utf8, native_str
+from tornado import httputil, stack_context
+from tornado.ioloop import IOLoop
+from tornado.util import Configurable
+
+
+class HTTPClient(object):
+ """A blocking HTTP client.
+
+ This interface is provided for convenience and testing; most applications
+ that are running an IOLoop will want to use `AsyncHTTPClient` instead.
+ Typical usage looks like this::
+
+ http_client = httpclient.HTTPClient()
+ try:
+ response = http_client.fetch("http://www.google.com/")
+ print(response.body)
+ except httpclient.HTTPError as e:
+ # HTTPError is raised for non-200 responses; the response
+ # can be found in e.response.
+ print("Error: " + str(e))
+ except Exception as e:
+ # Other errors are possible, such as IOError.
+ print("Error: " + str(e))
+ http_client.close()
+ """
+ def __init__(self, async_client_class=None, **kwargs):
+ self._io_loop = IOLoop(make_current=False)
+ if async_client_class is None:
+ async_client_class = AsyncHTTPClient
+ self._async_client = async_client_class(self._io_loop, **kwargs)
+ self._closed = False
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ """Closes the HTTPClient, freeing any resources used."""
+ if not self._closed:
+ self._async_client.close()
+ self._io_loop.close()
+ self._closed = True
+
+ def fetch(self, request, **kwargs):
+ """Executes a request, returning an `HTTPResponse`.
+
+ The request may be either a string URL or an `HTTPRequest` object.
+ If it is a string, we construct an `HTTPRequest` using any additional
+ kwargs: ``HTTPRequest(request, **kwargs)``
+
+ If an error occurs during the fetch, we raise an `HTTPError` unless
+ the ``raise_error`` keyword argument is set to False.
+ """
+ response = self._io_loop.run_sync(functools.partial(
+ self._async_client.fetch, request, **kwargs))
+ return response
+
+
+class AsyncHTTPClient(Configurable):
+ """An non-blocking HTTP client.
+
+ Example usage::
+
+ def handle_response(response):
+ if response.error:
+ print("Error: %s" % response.error)
+ else:
+ print(response.body)
+
+ http_client = AsyncHTTPClient()
+ http_client.fetch("http://www.google.com/", handle_response)
+
+ The constructor for this class is magic in several respects: It
+ actually creates an instance of an implementation-specific
+ subclass, and instances are reused as a kind of pseudo-singleton
+ (one per `.IOLoop`). The keyword argument ``force_instance=True``
+ can be used to suppress this singleton behavior. Unless
+ ``force_instance=True`` is used, no arguments other than
+ ``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
+ The implementation subclass as well as arguments to its
+ constructor can be set with the static method `configure()`
+
+ All `AsyncHTTPClient` implementations support a ``defaults``
+ keyword argument, which can be used to set default values for
+ `HTTPRequest` attributes. For example::
+
+ AsyncHTTPClient.configure(
+ None, defaults=dict(user_agent="MyUserAgent"))
+ # or with force_instance:
+ client = AsyncHTTPClient(force_instance=True,
+ defaults=dict(user_agent="MyUserAgent"))
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ @classmethod
+ def configurable_base(cls):
+ return AsyncHTTPClient
+
+ @classmethod
+ def configurable_default(cls):
+ from tornado.simple_httpclient import SimpleAsyncHTTPClient
+ return SimpleAsyncHTTPClient
+
+ @classmethod
+ def _async_clients(cls):
+ attr_name = '_async_client_dict_' + cls.__name__
+ if not hasattr(cls, attr_name):
+ setattr(cls, attr_name, weakref.WeakKeyDictionary())
+ return getattr(cls, attr_name)
+
+ def __new__(cls, io_loop=None, force_instance=False, **kwargs):
+ io_loop = io_loop or IOLoop.current()
+ if force_instance:
+ instance_cache = None
+ else:
+ instance_cache = cls._async_clients()
+ if instance_cache is not None and io_loop in instance_cache:
+ return instance_cache[io_loop]
+ instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
+ **kwargs)
+ # Make sure the instance knows which cache to remove itself from.
+ # It can't simply call _async_clients() because we may be in
+ # __new__(AsyncHTTPClient) but instance.__class__ may be
+ # SimpleAsyncHTTPClient.
+ instance._instance_cache = instance_cache
+ if instance_cache is not None:
+ instance_cache[instance.io_loop] = instance
+ return instance
+
+ def initialize(self, io_loop, defaults=None):
+ self.io_loop = io_loop
+ self.defaults = dict(HTTPRequest._DEFAULTS)
+ if defaults is not None:
+ self.defaults.update(defaults)
+ self._closed = False
+
+ def close(self):
+ """Destroys this HTTP client, freeing any file descriptors used.
+
+ This method is **not needed in normal use** due to the way
+ that `AsyncHTTPClient` objects are transparently reused.
+ ``close()`` is generally only necessary when either the
+ `.IOLoop` is also being closed, or the ``force_instance=True``
+ argument was used when creating the `AsyncHTTPClient`.
+
+ No other methods may be called on the `AsyncHTTPClient` after
+ ``close()``.
+
+ """
+ if self._closed:
+ return
+ self._closed = True
+ if self._instance_cache is not None:
+ if self._instance_cache.get(self.io_loop) is not self:
+ raise RuntimeError("inconsistent AsyncHTTPClient cache")
+ del self._instance_cache[self.io_loop]
+
+ def fetch(self, request, callback=None, raise_error=True, **kwargs):
+ """Executes a request, asynchronously returning an `HTTPResponse`.
+
+ The request may be either a string URL or an `HTTPRequest` object.
+ If it is a string, we construct an `HTTPRequest` using any additional
+ kwargs: ``HTTPRequest(request, **kwargs)``
+
+ This method returns a `.Future` whose result is an
+ `HTTPResponse`. By default, the ``Future`` will raise an
+ `HTTPError` if the request returned a non-200 response code
+ (other errors may also be raised if the server could not be
+ contacted). Instead, if ``raise_error`` is set to False, the
+ response will always be returned regardless of the response
+ code.
+
+ If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
+ In the callback interface, `HTTPError` is not automatically raised.
+ Instead, you must check the response's ``error`` attribute or
+ call its `~HTTPResponse.rethrow` method.
+ """
+ if self._closed:
+ raise RuntimeError("fetch() called on closed AsyncHTTPClient")
+ if not isinstance(request, HTTPRequest):
+ request = HTTPRequest(url=request, **kwargs)
+ else:
+ if kwargs:
+ raise ValueError("kwargs can't be used if request is an HTTPRequest object")
+ # We may modify this (to add Host, Accept-Encoding, etc),
+ # so make sure we don't modify the caller's object. This is also
+ # where normal dicts get converted to HTTPHeaders objects.
+ request.headers = httputil.HTTPHeaders(request.headers)
+ request = _RequestProxy(request, self.defaults)
+ future = TracebackFuture()
+ if callback is not None:
+ callback = stack_context.wrap(callback)
+
+ def handle_future(future):
+ exc = future.exception()
+ if isinstance(exc, HTTPError) and exc.response is not None:
+ response = exc.response
+ elif exc is not None:
+ response = HTTPResponse(
+ request, 599, error=exc,
+ request_time=time.time() - request.start_time)
+ else:
+ response = future.result()
+ self.io_loop.add_callback(callback, response)
+ future.add_done_callback(handle_future)
+
+ def handle_response(response):
+ if raise_error and response.error:
+ future.set_exception(response.error)
+ else:
+ future.set_result(response)
+ self.fetch_impl(request, handle_response)
+ return future
+
+ def fetch_impl(self, request, callback):
+ raise NotImplementedError()
+
+ @classmethod
+ def configure(cls, impl, **kwargs):
+ """Configures the `AsyncHTTPClient` subclass to use.
+
+ ``AsyncHTTPClient()`` actually creates an instance of a subclass.
+ This method may be called with either a class object or the
+ fully-qualified name of such a class (or ``None`` to use the default,
+ ``SimpleAsyncHTTPClient``)
+
+ If additional keyword arguments are given, they will be passed
+ to the constructor of each subclass instance created. The
+ keyword argument ``max_clients`` determines the maximum number
+ of simultaneous `~AsyncHTTPClient.fetch()` operations that can
+ execute in parallel on each `.IOLoop`. Additional arguments
+ may be supported depending on the implementation class in use.
+
+ Example::
+
+ AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
+ """
+ super(AsyncHTTPClient, cls).configure(impl, **kwargs)
+
+
+class HTTPRequest(object):
+ """HTTP client request object."""
+
+ # Default values for HTTPRequest parameters.
+ # Merged with the values on the request object by AsyncHTTPClient
+ # implementations.
+ _DEFAULTS = dict(
+ connect_timeout=20.0,
+ request_timeout=20.0,
+ follow_redirects=True,
+ max_redirects=5,
+ decompress_response=True,
+ proxy_password='',
+ allow_nonstandard_methods=False,
+ validate_cert=True)
+
+ def __init__(self, url, method="GET", headers=None, body=None,
+ auth_username=None, auth_password=None, auth_mode=None,
+ connect_timeout=None, request_timeout=None,
+ if_modified_since=None, follow_redirects=None,
+ max_redirects=None, user_agent=None, use_gzip=None,
+ network_interface=None, streaming_callback=None,
+ header_callback=None, prepare_curl_callback=None,
+ proxy_host=None, proxy_port=None, proxy_username=None,
+ proxy_password=None, proxy_auth_mode=None,
+ allow_nonstandard_methods=None, validate_cert=None,
+ ca_certs=None, allow_ipv6=None, client_key=None,
+ client_cert=None, body_producer=None,
+ expect_100_continue=False, decompress_response=None,
+ ssl_options=None):
+ r"""All parameters except ``url`` are optional.
+
+ :arg string url: URL to fetch
+ :arg string method: HTTP method, e.g. "GET" or "POST"
+ :arg headers: Additional HTTP headers to pass on the request
+ :type headers: `~tornado.httputil.HTTPHeaders` or `dict`
+ :arg body: HTTP request body as a string (byte or unicode; if unicode
+ the utf-8 encoding will be used)
+ :arg body_producer: Callable used for lazy/asynchronous request bodies.
+ It is called with one argument, a ``write`` function, and should
+ return a `.Future`. It should call the write function with new
+ data as it becomes available. The write function returns a
+ `.Future` which can be used for flow control.
+ Only one of ``body`` and ``body_producer`` may
+ be specified. ``body_producer`` is not supported on
+ ``curl_httpclient``. When using ``body_producer`` it is recommended
+ to pass a ``Content-Length`` in the headers as otherwise chunked
+ encoding will be used, and many servers do not support chunked
+ encoding on requests. New in Tornado 4.0
+ :arg string auth_username: Username for HTTP authentication
+ :arg string auth_password: Password for HTTP authentication
+ :arg string auth_mode: Authentication mode; default is "basic".
+ Allowed values are implementation-defined; ``curl_httpclient``
+ supports "basic" and "digest"; ``simple_httpclient`` only supports
+ "basic"
+ :arg float connect_timeout: Timeout for initial connection in seconds,
+ default 20 seconds
+ :arg float request_timeout: Timeout for entire request in seconds,
+ default 20 seconds
+ :arg if_modified_since: Timestamp for ``If-Modified-Since`` header
+ :type if_modified_since: `datetime` or `float`
+ :arg bool follow_redirects: Should redirects be followed automatically
+ or return the 3xx response? Default True.
+ :arg int max_redirects: Limit for ``follow_redirects``, default 5.
+ :arg string user_agent: String to send as ``User-Agent`` header
+ :arg bool decompress_response: Request a compressed response from
+ the server and decompress it after downloading. Default is True.
+ New in Tornado 4.0.
+ :arg bool use_gzip: Deprecated alias for ``decompress_response``
+ since Tornado 4.0.
+ :arg string network_interface: Network interface to use for request.
+ ``curl_httpclient`` only; see note below.
+ :arg callable streaming_callback: If set, ``streaming_callback`` will
+ be run with each chunk of data as it is received, and
+ ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
+ the final response.
+ :arg callable header_callback: If set, ``header_callback`` will
+ be run with each header line as it is received (including the
+ first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
+ containing only ``\r\n``. All lines include the trailing newline
+ characters). ``HTTPResponse.headers`` will be empty in the final
+ response. This is most useful in conjunction with
+ ``streaming_callback``, because it's the only way to get access to
+ header data while the request is in progress.
+ :arg callable prepare_curl_callback: If set, will be called with
+ a ``pycurl.Curl`` object to allow the application to make additional
+ ``setopt`` calls.
+ :arg string proxy_host: HTTP proxy hostname. To use proxies,
+ ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
+ ``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
+ currently only supported with ``curl_httpclient``.
+ :arg int proxy_port: HTTP proxy port
+ :arg string proxy_username: HTTP proxy username
+ :arg string proxy_password: HTTP proxy password
+ :arg string proxy_auth_mode: HTTP proxy Authentication mode;
+ default is "basic". supports "basic" and "digest"
+ :arg bool allow_nonstandard_methods: Allow unknown values for ``method``
+ argument? Default is False.
+ :arg bool validate_cert: For HTTPS requests, validate the server's
+ certificate? Default is True.
+ :arg string ca_certs: filename of CA certificates in PEM format,
+ or None to use defaults. See note below when used with
+ ``curl_httpclient``.
+ :arg string client_key: Filename for client SSL key, if any. See
+ note below when used with ``curl_httpclient``.
+ :arg string client_cert: Filename for client SSL certificate, if any.
+ See note below when used with ``curl_httpclient``.
+ :arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
+ ``simple_httpclient`` (unsupported by ``curl_httpclient``).
+ Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
+ and ``client_cert``.
+ :arg bool allow_ipv6: Use IPv6 when available? Default is true.
+ :arg bool expect_100_continue: If true, send the
+ ``Expect: 100-continue`` header and wait for a continue response
+ before sending the request body. Only supported with
+ simple_httpclient.
+
+ .. note::
+
+ When using ``curl_httpclient`` certain options may be
+ inherited by subsequent fetches because ``pycurl`` does
+ not allow them to be cleanly reset. This applies to the
+ ``ca_certs``, ``client_key``, ``client_cert``, and
+ ``network_interface`` arguments. If you use these
+ options, you should pass them on every request (you don't
+ have to always use the same values, but it's not possible
+ to mix requests that specify these options with ones that
+ use the defaults).
+
+ .. versionadded:: 3.1
+ The ``auth_mode`` argument.
+
+ .. versionadded:: 4.0
+ The ``body_producer`` and ``expect_100_continue`` arguments.
+
+ .. versionadded:: 4.2
+ The ``ssl_options`` argument.
+
+ .. versionadded:: 4.5
+ The ``proxy_auth_mode`` argument.
+ """
+ # Note that some of these attributes go through property setters
+ # defined below.
+ self.headers = headers
+ if if_modified_since:
+ self.headers["If-Modified-Since"] = httputil.format_timestamp(
+ if_modified_since)
+ self.proxy_host = proxy_host
+ self.proxy_port = proxy_port
+ self.proxy_username = proxy_username
+ self.proxy_password = proxy_password
+ self.proxy_auth_mode = proxy_auth_mode
+ self.url = url
+ self.method = method
+ self.body = body
+ self.body_producer = body_producer
+ self.auth_username = auth_username
+ self.auth_password = auth_password
+ self.auth_mode = auth_mode
+ self.connect_timeout = connect_timeout
+ self.request_timeout = request_timeout
+ self.follow_redirects = follow_redirects
+ self.max_redirects = max_redirects
+ self.user_agent = user_agent
+ if decompress_response is not None:
+ self.decompress_response = decompress_response
+ else:
+ self.decompress_response = use_gzip
+ self.network_interface = network_interface
+ self.streaming_callback = streaming_callback
+ self.header_callback = header_callback
+ self.prepare_curl_callback = prepare_curl_callback
+ self.allow_nonstandard_methods = allow_nonstandard_methods
+ self.validate_cert = validate_cert
+ self.ca_certs = ca_certs
+ self.allow_ipv6 = allow_ipv6
+ self.client_key = client_key
+ self.client_cert = client_cert
+ self.ssl_options = ssl_options
+ self.expect_100_continue = expect_100_continue
+ self.start_time = time.time()
+
+ @property
+ def headers(self):
+ return self._headers
+
+ @headers.setter
+ def headers(self, value):
+ if value is None:
+ self._headers = httputil.HTTPHeaders()
+ else:
+ self._headers = value
+
+ @property
+ def body(self):
+ return self._body
+
+ @body.setter
+ def body(self, value):
+ self._body = utf8(value)
+
+ @property
+ def body_producer(self):
+ return self._body_producer
+
+ @body_producer.setter
+ def body_producer(self, value):
+ self._body_producer = stack_context.wrap(value)
+
+ @property
+ def streaming_callback(self):
+ return self._streaming_callback
+
+ @streaming_callback.setter
+ def streaming_callback(self, value):
+ self._streaming_callback = stack_context.wrap(value)
+
+ @property
+ def header_callback(self):
+ return self._header_callback
+
+ @header_callback.setter
+ def header_callback(self, value):
+ self._header_callback = stack_context.wrap(value)
+
+ @property
+ def prepare_curl_callback(self):
+ return self._prepare_curl_callback
+
+ @prepare_curl_callback.setter
+ def prepare_curl_callback(self, value):
+ self._prepare_curl_callback = stack_context.wrap(value)
+
+
+class HTTPResponse(object):
+ """HTTP Response object.
+
+ Attributes:
+
+ * request: HTTPRequest object
+
+ * code: numeric HTTP status code, e.g. 200 or 404
+
+ * reason: human-readable reason phrase describing the status code
+
+ * headers: `tornado.httputil.HTTPHeaders` object
+
+ * effective_url: final location of the resource after following any
+ redirects
+
+ * buffer: ``cStringIO`` object for response body
+
+ * body: response body as bytes (created on demand from ``self.buffer``)
+
+ * error: Exception object, if any
+
+ * request_time: seconds from request start to finish
+
+ * time_info: dictionary of diagnostic timing information from the request.
+ Available data are subject to change, but currently uses timings
+ available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
+ plus ``queue``, which is the delay (if any) introduced by waiting for
+ a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
+ """
+ def __init__(self, request, code, headers=None, buffer=None,
+ effective_url=None, error=None, request_time=None,
+ time_info=None, reason=None):
+ if isinstance(request, _RequestProxy):
+ self.request = request.request
+ else:
+ self.request = request
+ self.code = code
+ self.reason = reason or httputil.responses.get(code, "Unknown")
+ if headers is not None:
+ self.headers = headers
+ else:
+ self.headers = httputil.HTTPHeaders()
+ self.buffer = buffer
+ self._body = None
+ if effective_url is None:
+ self.effective_url = request.url
+ else:
+ self.effective_url = effective_url
+ if error is None:
+ if self.code < 200 or self.code >= 300:
+ self.error = HTTPError(self.code, message=self.reason,
+ response=self)
+ else:
+ self.error = None
+ else:
+ self.error = error
+ self.request_time = request_time
+ self.time_info = time_info or {}
+
+ @property
+ def body(self):
+ if self.buffer is None:
+ return None
+ elif self._body is None:
+ self._body = self.buffer.getvalue()
+
+ return self._body
+
+ def rethrow(self):
+ """If there was an error on the request, raise an `HTTPError`."""
+ if self.error:
+ raise self.error
+
+ def __repr__(self):
+ args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
+ return "%s(%s)" % (self.__class__.__name__, args)
+
+
+class HTTPError(Exception):
+ """Exception thrown for an unsuccessful HTTP request.
+
+ Attributes:
+
+ * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
+ used when no HTTP response was received, e.g. for a timeout.
+
+ * ``response`` - `HTTPResponse` object, if any.
+
+ Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
+ and you can look at ``error.response.headers['Location']`` to see the
+ destination of the redirect.
+ """
+ def __init__(self, code, message=None, response=None):
+ self.code = code
+ self.message = message or httputil.responses.get(code, "Unknown")
+ self.response = response
+ super(HTTPError, self).__init__(code, message, response)
+
+ def __str__(self):
+ return "HTTP %d: %s" % (self.code, self.message)
+
+ # There is a cyclic reference between self and self.response,
+ # which breaks the default __repr__ implementation.
+ # (especially on pypy, which doesn't have the same recursion
+ # detection as cpython).
+ __repr__ = __str__
+
+
+class _RequestProxy(object):
+ """Combines an object with a dictionary of defaults.
+
+ Used internally by AsyncHTTPClient implementations.
+ """
+ def __init__(self, request, defaults):
+ self.request = request
+ self.defaults = defaults
+
+ def __getattr__(self, name):
+ request_attr = getattr(self.request, name)
+ if request_attr is not None:
+ return request_attr
+ elif self.defaults is not None:
+ return self.defaults.get(name, None)
+ else:
+ return None
+
+
+def main():
+ from tornado.options import define, options, parse_command_line
+ define("print_headers", type=bool, default=False)
+ define("print_body", type=bool, default=True)
+ define("follow_redirects", type=bool, default=True)
+ define("validate_cert", type=bool, default=True)
+ args = parse_command_line()
+ client = HTTPClient()
+ for arg in args:
+ try:
+ response = client.fetch(arg,
+ follow_redirects=options.follow_redirects,
+ validate_cert=options.validate_cert,
+ )
+ except HTTPError as e:
+ if e.response is not None:
+ response = e.response
+ else:
+ raise
+ if options.print_headers:
+ print(response.headers)
+ if options.print_body:
+ print(native_str(response.body))
+ client.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/contrib/python/tornado/tornado-4/tornado/httpserver.py b/contrib/python/tornado/tornado-4/tornado/httpserver.py
index d757be188d..90fb01b8d6 100644
--- a/contrib/python/tornado/tornado-4/tornado/httpserver.py
+++ b/contrib/python/tornado/tornado-4/tornado/httpserver.py
@@ -1,325 +1,325 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A non-blocking, single-threaded HTTP server.
-
-Typical applications have little direct interaction with the `HTTPServer`
-class except to start a server at the beginning of the process
-(and even that is often done indirectly via `tornado.web.Application.listen`).
-
-.. versionchanged:: 4.0
-
- The ``HTTPRequest`` class that used to live in this module has been moved
- to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import socket
-
-from tornado.escape import native_str
-from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
-from tornado import gen
-from tornado import httputil
-from tornado import iostream
-from tornado import netutil
-from tornado.tcpserver import TCPServer
-from tornado.util import Configurable
-
-
-class HTTPServer(TCPServer, Configurable,
- httputil.HTTPServerConnectionDelegate):
- r"""A non-blocking, single-threaded HTTP server.
-
- A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
- or, for backwards compatibility, a callback that takes an
- `.HTTPServerRequest` as an argument. The delegate is usually a
- `tornado.web.Application`.
-
- `HTTPServer` supports keep-alive connections by default
- (automatically for HTTP/1.1, or for HTTP/1.0 when the client
- requests ``Connection: keep-alive``).
-
- If ``xheaders`` is ``True``, we support the
- ``X-Real-Ip``/``X-Forwarded-For`` and
- ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
- remote IP and URI scheme/protocol for all requests. These headers
- are useful when running Tornado behind a reverse proxy or load
- balancer. The ``protocol`` argument can also be set to ``https``
- if Tornado is run behind an SSL-decoding proxy that does not set one of
- the supported ``xheaders``.
-
- By default, when parsing the ``X-Forwarded-For`` header, Tornado will
- select the last (i.e., the closest) address on the list of hosts as the
- remote host IP address. To select the next server in the chain, a list of
- trusted downstream hosts may be passed as the ``trusted_downstream``
- argument. These hosts will be skipped when parsing the ``X-Forwarded-For``
- header.
-
- To make this server serve SSL traffic, send the ``ssl_options`` keyword
- argument with an `ssl.SSLContext` object. For compatibility with older
- versions of Python ``ssl_options`` may also be a dictionary of keyword
- arguments for the `ssl.wrap_socket` method.::
-
- ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
- ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
- os.path.join(data_dir, "mydomain.key"))
- HTTPServer(applicaton, ssl_options=ssl_ctx)
-
- `HTTPServer` initialization follows one of three patterns (the
- initialization methods are defined on `tornado.tcpserver.TCPServer`):
-
- 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
-
- server = HTTPServer(app)
- server.listen(8888)
- IOLoop.current().start()
-
- In many cases, `tornado.web.Application.listen` can be used to avoid
- the need to explicitly create the `HTTPServer`.
-
- 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
- simple multi-process::
-
- server = HTTPServer(app)
- server.bind(8888)
- server.start(0) # Forks multiple sub-processes
- IOLoop.current().start()
-
- When using this interface, an `.IOLoop` must *not* be passed
- to the `HTTPServer` constructor. `~.TCPServer.start` will always start
- the server on the default singleton `.IOLoop`.
-
- 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
-
- sockets = tornado.netutil.bind_sockets(8888)
- tornado.process.fork_processes(0)
- server = HTTPServer(app)
- server.add_sockets(sockets)
- IOLoop.current().start()
-
- The `~.TCPServer.add_sockets` interface is more complicated,
- but it can be used with `tornado.process.fork_processes` to
- give you more flexibility in when the fork happens.
- `~.TCPServer.add_sockets` can also be used in single-process
- servers if you want to create your listening sockets in some
- way other than `tornado.netutil.bind_sockets`.
-
- .. versionchanged:: 4.0
- Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
- ``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
- arguments. Added support for `.HTTPServerConnectionDelegate`
- instances as ``request_callback``.
-
- .. versionchanged:: 4.1
- `.HTTPServerConnectionDelegate.start_request` is now called with
- two arguments ``(server_conn, request_conn)`` (in accordance with the
- documentation) instead of one ``(request_conn)``.
-
- .. versionchanged:: 4.2
- `HTTPServer` is now a subclass of `tornado.util.Configurable`.
-
- .. versionchanged:: 4.5
- Added the ``trusted_downstream`` argument.
- """
- def __init__(self, *args, **kwargs):
- # Ignore args to __init__; real initialization belongs in
- # initialize since we're Configurable. (there's something
- # weird in initialization order between this class,
- # Configurable, and TCPServer so we can't leave __init__ out
- # completely)
- pass
-
- def initialize(self, request_callback, no_keep_alive=False, io_loop=None,
- xheaders=False, ssl_options=None, protocol=None,
- decompress_request=False,
- chunk_size=None, max_header_size=None,
- idle_connection_timeout=None, body_timeout=None,
- max_body_size=None, max_buffer_size=None,
- trusted_downstream=None):
- self.request_callback = request_callback
- self.no_keep_alive = no_keep_alive
- self.xheaders = xheaders
- self.protocol = protocol
- self.conn_params = HTTP1ConnectionParameters(
- decompress=decompress_request,
- chunk_size=chunk_size,
- max_header_size=max_header_size,
- header_timeout=idle_connection_timeout or 3600,
- max_body_size=max_body_size,
- body_timeout=body_timeout,
- no_keep_alive=no_keep_alive)
- TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
- max_buffer_size=max_buffer_size,
- read_chunk_size=chunk_size)
- self._connections = set()
- self.trusted_downstream = trusted_downstream
-
- @classmethod
- def configurable_base(cls):
- return HTTPServer
-
- @classmethod
- def configurable_default(cls):
- return HTTPServer
-
- @gen.coroutine
- def close_all_connections(self):
- while self._connections:
- # Peek at an arbitrary element of the set
- conn = next(iter(self._connections))
- yield conn.close()
-
- def handle_stream(self, stream, address):
- context = _HTTPRequestContext(stream, address,
- self.protocol,
- self.trusted_downstream)
- conn = HTTP1ServerConnection(
- stream, self.conn_params, context)
- self._connections.add(conn)
- conn.start_serving(self)
-
- def start_request(self, server_conn, request_conn):
- if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate):
- delegate = self.request_callback.start_request(server_conn, request_conn)
- else:
- delegate = _CallableAdapter(self.request_callback, request_conn)
-
- if self.xheaders:
- delegate = _ProxyAdapter(delegate, request_conn)
-
- return delegate
-
- def on_close(self, server_conn):
- self._connections.remove(server_conn)
-
-
-class _CallableAdapter(httputil.HTTPMessageDelegate):
- def __init__(self, request_callback, request_conn):
- self.connection = request_conn
- self.request_callback = request_callback
- self.request = None
- self.delegate = None
- self._chunks = []
-
- def headers_received(self, start_line, headers):
- self.request = httputil.HTTPServerRequest(
- connection=self.connection, start_line=start_line,
- headers=headers)
-
- def data_received(self, chunk):
- self._chunks.append(chunk)
-
- def finish(self):
- self.request.body = b''.join(self._chunks)
- self.request._parse_body()
- self.request_callback(self.request)
-
- def on_connection_close(self):
- self._chunks = None
-
-
-class _HTTPRequestContext(object):
- def __init__(self, stream, address, protocol, trusted_downstream=None):
- self.address = address
- # Save the socket's address family now so we know how to
- # interpret self.address even after the stream is closed
- # and its socket attribute replaced with None.
- if stream.socket is not None:
- self.address_family = stream.socket.family
- else:
- self.address_family = None
- # In HTTPServerRequest we want an IP, not a full socket address.
- if (self.address_family in (socket.AF_INET, socket.AF_INET6) and
- address is not None):
- self.remote_ip = address[0]
- else:
- # Unix (or other) socket; fake the remote address.
- self.remote_ip = '0.0.0.0'
- if protocol:
- self.protocol = protocol
- elif isinstance(stream, iostream.SSLIOStream):
- self.protocol = "https"
- else:
- self.protocol = "http"
- self._orig_remote_ip = self.remote_ip
- self._orig_protocol = self.protocol
- self.trusted_downstream = set(trusted_downstream or [])
-
- def __str__(self):
- if self.address_family in (socket.AF_INET, socket.AF_INET6):
- return self.remote_ip
- elif isinstance(self.address, bytes):
- # Python 3 with the -bb option warns about str(bytes),
- # so convert it explicitly.
- # Unix socket addresses are str on mac but bytes on linux.
- return native_str(self.address)
- else:
- return str(self.address)
-
- def _apply_xheaders(self, headers):
- """Rewrite the ``remote_ip`` and ``protocol`` fields."""
- # Squid uses X-Forwarded-For, others use X-Real-Ip
- ip = headers.get("X-Forwarded-For", self.remote_ip)
- # Skip trusted downstream hosts in X-Forwarded-For list
- for ip in (cand.strip() for cand in reversed(ip.split(','))):
- if ip not in self.trusted_downstream:
- break
- ip = headers.get("X-Real-Ip", ip)
- if netutil.is_valid_ip(ip):
- self.remote_ip = ip
- # AWS uses X-Forwarded-Proto
- proto_header = headers.get(
- "X-Scheme", headers.get("X-Forwarded-Proto",
- self.protocol))
- if proto_header in ("http", "https"):
- self.protocol = proto_header
-
- def _unapply_xheaders(self):
- """Undo changes from `_apply_xheaders`.
-
- Xheaders are per-request so they should not leak to the next
- request on the same connection.
- """
- self.remote_ip = self._orig_remote_ip
- self.protocol = self._orig_protocol
-
-
-class _ProxyAdapter(httputil.HTTPMessageDelegate):
- def __init__(self, delegate, request_conn):
- self.connection = request_conn
- self.delegate = delegate
-
- def headers_received(self, start_line, headers):
- self.connection.context._apply_xheaders(headers)
- return self.delegate.headers_received(start_line, headers)
-
- def data_received(self, chunk):
- return self.delegate.data_received(chunk)
-
- def finish(self):
- self.delegate.finish()
- self._cleanup()
-
- def on_connection_close(self):
- self.delegate.on_connection_close()
- self._cleanup()
-
- def _cleanup(self):
- self.connection.context._unapply_xheaders()
-
-
-HTTPRequest = httputil.HTTPServerRequest
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A non-blocking, single-threaded HTTP server.
+
+Typical applications have little direct interaction with the `HTTPServer`
+class except to start a server at the beginning of the process
+(and even that is often done indirectly via `tornado.web.Application.listen`).
+
+.. versionchanged:: 4.0
+
+ The ``HTTPRequest`` class that used to live in this module has been moved
+ to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import socket
+
+from tornado.escape import native_str
+from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
+from tornado import gen
+from tornado import httputil
+from tornado import iostream
+from tornado import netutil
+from tornado.tcpserver import TCPServer
+from tornado.util import Configurable
+
+
+class HTTPServer(TCPServer, Configurable,
+ httputil.HTTPServerConnectionDelegate):
+ r"""A non-blocking, single-threaded HTTP server.
+
+ A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
+ or, for backwards compatibility, a callback that takes an
+ `.HTTPServerRequest` as an argument. The delegate is usually a
+ `tornado.web.Application`.
+
+ `HTTPServer` supports keep-alive connections by default
+ (automatically for HTTP/1.1, or for HTTP/1.0 when the client
+ requests ``Connection: keep-alive``).
+
+ If ``xheaders`` is ``True``, we support the
+ ``X-Real-Ip``/``X-Forwarded-For`` and
+ ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
+ remote IP and URI scheme/protocol for all requests. These headers
+ are useful when running Tornado behind a reverse proxy or load
+ balancer. The ``protocol`` argument can also be set to ``https``
+ if Tornado is run behind an SSL-decoding proxy that does not set one of
+ the supported ``xheaders``.
+
+ By default, when parsing the ``X-Forwarded-For`` header, Tornado will
+ select the last (i.e., the closest) address on the list of hosts as the
+ remote host IP address. To select the next server in the chain, a list of
+ trusted downstream hosts may be passed as the ``trusted_downstream``
+ argument. These hosts will be skipped when parsing the ``X-Forwarded-For``
+ header.
+
+ To make this server serve SSL traffic, send the ``ssl_options`` keyword
+ argument with an `ssl.SSLContext` object. For compatibility with older
+ versions of Python ``ssl_options`` may also be a dictionary of keyword
+ arguments for the `ssl.wrap_socket` method.::
+
+ ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
+ os.path.join(data_dir, "mydomain.key"))
+ HTTPServer(applicaton, ssl_options=ssl_ctx)
+
+ `HTTPServer` initialization follows one of three patterns (the
+ initialization methods are defined on `tornado.tcpserver.TCPServer`):
+
+ 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process::
+
+ server = HTTPServer(app)
+ server.listen(8888)
+ IOLoop.current().start()
+
+ In many cases, `tornado.web.Application.listen` can be used to avoid
+ the need to explicitly create the `HTTPServer`.
+
+ 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
+ simple multi-process::
+
+ server = HTTPServer(app)
+ server.bind(8888)
+ server.start(0) # Forks multiple sub-processes
+ IOLoop.current().start()
+
+ When using this interface, an `.IOLoop` must *not* be passed
+ to the `HTTPServer` constructor. `~.TCPServer.start` will always start
+ the server on the default singleton `.IOLoop`.
+
+ 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process::
+
+ sockets = tornado.netutil.bind_sockets(8888)
+ tornado.process.fork_processes(0)
+ server = HTTPServer(app)
+ server.add_sockets(sockets)
+ IOLoop.current().start()
+
+ The `~.TCPServer.add_sockets` interface is more complicated,
+ but it can be used with `tornado.process.fork_processes` to
+ give you more flexibility in when the fork happens.
+ `~.TCPServer.add_sockets` can also be used in single-process
+ servers if you want to create your listening sockets in some
+ way other than `tornado.netutil.bind_sockets`.
+
+ .. versionchanged:: 4.0
+ Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
+ ``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
+ arguments. Added support for `.HTTPServerConnectionDelegate`
+ instances as ``request_callback``.
+
+ .. versionchanged:: 4.1
+ `.HTTPServerConnectionDelegate.start_request` is now called with
+ two arguments ``(server_conn, request_conn)`` (in accordance with the
+ documentation) instead of one ``(request_conn)``.
+
+ .. versionchanged:: 4.2
+ `HTTPServer` is now a subclass of `tornado.util.Configurable`.
+
+ .. versionchanged:: 4.5
+ Added the ``trusted_downstream`` argument.
+ """
+ def __init__(self, *args, **kwargs):
+ # Ignore args to __init__; real initialization belongs in
+ # initialize since we're Configurable. (there's something
+ # weird in initialization order between this class,
+ # Configurable, and TCPServer so we can't leave __init__ out
+ # completely)
+ pass
+
+ def initialize(self, request_callback, no_keep_alive=False, io_loop=None,
+ xheaders=False, ssl_options=None, protocol=None,
+ decompress_request=False,
+ chunk_size=None, max_header_size=None,
+ idle_connection_timeout=None, body_timeout=None,
+ max_body_size=None, max_buffer_size=None,
+ trusted_downstream=None):
+ self.request_callback = request_callback
+ self.no_keep_alive = no_keep_alive
+ self.xheaders = xheaders
+ self.protocol = protocol
+ self.conn_params = HTTP1ConnectionParameters(
+ decompress=decompress_request,
+ chunk_size=chunk_size,
+ max_header_size=max_header_size,
+ header_timeout=idle_connection_timeout or 3600,
+ max_body_size=max_body_size,
+ body_timeout=body_timeout,
+ no_keep_alive=no_keep_alive)
+ TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
+ max_buffer_size=max_buffer_size,
+ read_chunk_size=chunk_size)
+ self._connections = set()
+ self.trusted_downstream = trusted_downstream
+
+ @classmethod
+ def configurable_base(cls):
+ return HTTPServer
+
+ @classmethod
+ def configurable_default(cls):
+ return HTTPServer
+
+ @gen.coroutine
+ def close_all_connections(self):
+ while self._connections:
+ # Peek at an arbitrary element of the set
+ conn = next(iter(self._connections))
+ yield conn.close()
+
+ def handle_stream(self, stream, address):
+ context = _HTTPRequestContext(stream, address,
+ self.protocol,
+ self.trusted_downstream)
+ conn = HTTP1ServerConnection(
+ stream, self.conn_params, context)
+ self._connections.add(conn)
+ conn.start_serving(self)
+
+ def start_request(self, server_conn, request_conn):
+ if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate):
+ delegate = self.request_callback.start_request(server_conn, request_conn)
+ else:
+ delegate = _CallableAdapter(self.request_callback, request_conn)
+
+ if self.xheaders:
+ delegate = _ProxyAdapter(delegate, request_conn)
+
+ return delegate
+
+ def on_close(self, server_conn):
+ self._connections.remove(server_conn)
+
+
+class _CallableAdapter(httputil.HTTPMessageDelegate):
+ def __init__(self, request_callback, request_conn):
+ self.connection = request_conn
+ self.request_callback = request_callback
+ self.request = None
+ self.delegate = None
+ self._chunks = []
+
+ def headers_received(self, start_line, headers):
+ self.request = httputil.HTTPServerRequest(
+ connection=self.connection, start_line=start_line,
+ headers=headers)
+
+ def data_received(self, chunk):
+ self._chunks.append(chunk)
+
+ def finish(self):
+ self.request.body = b''.join(self._chunks)
+ self.request._parse_body()
+ self.request_callback(self.request)
+
+ def on_connection_close(self):
+ self._chunks = None
+
+
+class _HTTPRequestContext(object):
+ def __init__(self, stream, address, protocol, trusted_downstream=None):
+ self.address = address
+ # Save the socket's address family now so we know how to
+ # interpret self.address even after the stream is closed
+ # and its socket attribute replaced with None.
+ if stream.socket is not None:
+ self.address_family = stream.socket.family
+ else:
+ self.address_family = None
+ # In HTTPServerRequest we want an IP, not a full socket address.
+ if (self.address_family in (socket.AF_INET, socket.AF_INET6) and
+ address is not None):
+ self.remote_ip = address[0]
+ else:
+ # Unix (or other) socket; fake the remote address.
+ self.remote_ip = '0.0.0.0'
+ if protocol:
+ self.protocol = protocol
+ elif isinstance(stream, iostream.SSLIOStream):
+ self.protocol = "https"
+ else:
+ self.protocol = "http"
+ self._orig_remote_ip = self.remote_ip
+ self._orig_protocol = self.protocol
+ self.trusted_downstream = set(trusted_downstream or [])
+
+ def __str__(self):
+ if self.address_family in (socket.AF_INET, socket.AF_INET6):
+ return self.remote_ip
+ elif isinstance(self.address, bytes):
+ # Python 3 with the -bb option warns about str(bytes),
+ # so convert it explicitly.
+ # Unix socket addresses are str on mac but bytes on linux.
+ return native_str(self.address)
+ else:
+ return str(self.address)
+
+ def _apply_xheaders(self, headers):
+ """Rewrite the ``remote_ip`` and ``protocol`` fields."""
+ # Squid uses X-Forwarded-For, others use X-Real-Ip
+ ip = headers.get("X-Forwarded-For", self.remote_ip)
+ # Skip trusted downstream hosts in X-Forwarded-For list
+ for ip in (cand.strip() for cand in reversed(ip.split(','))):
+ if ip not in self.trusted_downstream:
+ break
+ ip = headers.get("X-Real-Ip", ip)
+ if netutil.is_valid_ip(ip):
+ self.remote_ip = ip
+ # AWS uses X-Forwarded-Proto
+ proto_header = headers.get(
+ "X-Scheme", headers.get("X-Forwarded-Proto",
+ self.protocol))
+ if proto_header in ("http", "https"):
+ self.protocol = proto_header
+
+ def _unapply_xheaders(self):
+ """Undo changes from `_apply_xheaders`.
+
+ Xheaders are per-request so they should not leak to the next
+ request on the same connection.
+ """
+ self.remote_ip = self._orig_remote_ip
+ self.protocol = self._orig_protocol
+
+
+class _ProxyAdapter(httputil.HTTPMessageDelegate):
+ def __init__(self, delegate, request_conn):
+ self.connection = request_conn
+ self.delegate = delegate
+
+ def headers_received(self, start_line, headers):
+ self.connection.context._apply_xheaders(headers)
+ return self.delegate.headers_received(start_line, headers)
+
+ def data_received(self, chunk):
+ return self.delegate.data_received(chunk)
+
+ def finish(self):
+ self.delegate.finish()
+ self._cleanup()
+
+ def on_connection_close(self):
+ self.delegate.on_connection_close()
+ self._cleanup()
+
+ def _cleanup(self):
+ self.connection.context._unapply_xheaders()
+
+
+HTTPRequest = httputil.HTTPServerRequest
diff --git a/contrib/python/tornado/tornado-4/tornado/httputil.py b/contrib/python/tornado/tornado-4/tornado/httputil.py
index 9654b5ab4b..8be39ebf2b 100644
--- a/contrib/python/tornado/tornado-4/tornado/httputil.py
+++ b/contrib/python/tornado/tornado-4/tornado/httputil.py
@@ -1,1026 +1,1026 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""HTTP utility code shared by clients and servers.
-
-This module also defines the `HTTPServerRequest` class which is exposed
-via `tornado.web.RequestHandler.request`.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import calendar
-import collections
-import copy
-import datetime
-import email.utils
-import numbers
-import re
-import time
-
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""HTTP utility code shared by clients and servers.
+
+This module also defines the `HTTPServerRequest` class which is exposed
+via `tornado.web.RequestHandler.request`.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import calendar
+import collections
+import copy
+import datetime
+import email.utils
+import numbers
+import re
+import time
+
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
-from tornado.escape import native_str, parse_qs_bytes, utf8
-from tornado.log import gen_log
-from tornado.util import ObjectDict, PY3
-
-if PY3:
- import http.cookies as Cookie
- from http.client import responses
- from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
-else:
- import Cookie
- from httplib import responses
- from urllib import urlencode
- from urlparse import urlparse, urlunparse, parse_qsl
-
-
-# responses is unused in this file, but we re-export it to other files.
-# Reference it so pyflakes doesn't complain.
-responses
-
-try:
- from ssl import SSLError
-except ImportError:
- # ssl is unavailable on app engine.
- class _SSLError(Exception):
- pass
- # Hack around a mypy limitation. We can't simply put "type: ignore"
- # on the class definition itself; must go through an assignment.
- SSLError = _SSLError # type: ignore
-
-try:
- import typing
-except ImportError:
- pass
-
-
-# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line
-# terminator and ignore any preceding CR.
-_CRLF_RE = re.compile(r'\r?\n')
-
-
-class _NormalizedHeaderCache(dict):
- """Dynamic cached mapping of header names to Http-Header-Case.
-
- Implemented as a dict subclass so that cache hits are as fast as a
- normal dict lookup, without the overhead of a python function
- call.
-
- >>> normalized_headers = _NormalizedHeaderCache(10)
- >>> normalized_headers["coNtent-TYPE"]
- 'Content-Type'
- """
- def __init__(self, size):
- super(_NormalizedHeaderCache, self).__init__()
- self.size = size
- self.queue = collections.deque()
-
- def __missing__(self, key):
- normalized = "-".join([w.capitalize() for w in key.split("-")])
- self[key] = normalized
- self.queue.append(key)
- if len(self.queue) > self.size:
- # Limit the size of the cache. LRU would be better, but this
- # simpler approach should be fine. In Python 2.7+ we could
- # use OrderedDict (or in 3.2+, @functools.lru_cache).
- old_key = self.queue.popleft()
- del self[old_key]
- return normalized
-
-
-_normalized_headers = _NormalizedHeaderCache(1000)
-
-
+from tornado.escape import native_str, parse_qs_bytes, utf8
+from tornado.log import gen_log
+from tornado.util import ObjectDict, PY3
+
+if PY3:
+ import http.cookies as Cookie
+ from http.client import responses
+ from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
+else:
+ import Cookie
+ from httplib import responses
+ from urllib import urlencode
+ from urlparse import urlparse, urlunparse, parse_qsl
+
+
+# responses is unused in this file, but we re-export it to other files.
+# Reference it so pyflakes doesn't complain.
+responses
+
+try:
+ from ssl import SSLError
+except ImportError:
+ # ssl is unavailable on app engine.
+ class _SSLError(Exception):
+ pass
+ # Hack around a mypy limitation. We can't simply put "type: ignore"
+ # on the class definition itself; must go through an assignment.
+ SSLError = _SSLError # type: ignore
+
+try:
+ import typing
+except ImportError:
+ pass
+
+
+# RFC 7230 section 3.5: a recipient MAY recognize a single LF as a line
+# terminator and ignore any preceding CR.
+_CRLF_RE = re.compile(r'\r?\n')
+
+
+class _NormalizedHeaderCache(dict):
+ """Dynamic cached mapping of header names to Http-Header-Case.
+
+ Implemented as a dict subclass so that cache hits are as fast as a
+ normal dict lookup, without the overhead of a python function
+ call.
+
+ >>> normalized_headers = _NormalizedHeaderCache(10)
+ >>> normalized_headers["coNtent-TYPE"]
+ 'Content-Type'
+ """
+ def __init__(self, size):
+ super(_NormalizedHeaderCache, self).__init__()
+ self.size = size
+ self.queue = collections.deque()
+
+ def __missing__(self, key):
+ normalized = "-".join([w.capitalize() for w in key.split("-")])
+ self[key] = normalized
+ self.queue.append(key)
+ if len(self.queue) > self.size:
+ # Limit the size of the cache. LRU would be better, but this
+ # simpler approach should be fine. In Python 2.7+ we could
+ # use OrderedDict (or in 3.2+, @functools.lru_cache).
+ old_key = self.queue.popleft()
+ del self[old_key]
+ return normalized
+
+
+_normalized_headers = _NormalizedHeaderCache(1000)
+
+
class HTTPHeaders(MutableMapping):
- """A dictionary that maintains ``Http-Header-Case`` for all keys.
-
- Supports multiple values per key via a pair of new methods,
- `add()` and `get_list()`. The regular dictionary interface
- returns a single value per key, with multiple values joined by a
- comma.
-
- >>> h = HTTPHeaders({"content-type": "text/html"})
- >>> list(h.keys())
- ['Content-Type']
- >>> h["Content-Type"]
- 'text/html'
-
- >>> h.add("Set-Cookie", "A=B")
- >>> h.add("Set-Cookie", "C=D")
- >>> h["set-cookie"]
- 'A=B,C=D'
- >>> h.get_list("set-cookie")
- ['A=B', 'C=D']
-
- >>> for (k,v) in sorted(h.get_all()):
- ... print('%s: %s' % (k,v))
- ...
- Content-Type: text/html
- Set-Cookie: A=B
- Set-Cookie: C=D
- """
- def __init__(self, *args, **kwargs):
- self._dict = {} # type: typing.Dict[str, str]
- self._as_list = {} # type: typing.Dict[str, typing.List[str]]
- self._last_key = None
- if (len(args) == 1 and len(kwargs) == 0 and
- isinstance(args[0], HTTPHeaders)):
- # Copy constructor
- for k, v in args[0].get_all():
- self.add(k, v)
- else:
- # Dict-style initialization
- self.update(*args, **kwargs)
-
- # new public methods
-
- def add(self, name, value):
- # type: (str, str) -> None
- """Adds a new value for the given key."""
- norm_name = _normalized_headers[name]
- self._last_key = norm_name
- if norm_name in self:
- self._dict[norm_name] = (native_str(self[norm_name]) + ',' +
- native_str(value))
- self._as_list[norm_name].append(value)
- else:
- self[norm_name] = value
-
- def get_list(self, name):
- """Returns all values for the given header as a list."""
- norm_name = _normalized_headers[name]
- return self._as_list.get(norm_name, [])
-
- def get_all(self):
- # type: () -> typing.Iterable[typing.Tuple[str, str]]
- """Returns an iterable of all (name, value) pairs.
-
- If a header has multiple values, multiple pairs will be
- returned with the same name.
- """
- for name, values in self._as_list.items():
- for value in values:
- yield (name, value)
-
- def parse_line(self, line):
- """Updates the dictionary with a single header line.
-
- >>> h = HTTPHeaders()
- >>> h.parse_line("Content-Type: text/html")
- >>> h.get('content-type')
- 'text/html'
- """
- if line[0].isspace():
- # continuation of a multi-line header
- new_part = ' ' + line.lstrip()
- self._as_list[self._last_key][-1] += new_part
- self._dict[self._last_key] += new_part
- else:
- name, value = line.split(":", 1)
- self.add(name, value.strip())
-
- @classmethod
- def parse(cls, headers):
- """Returns a dictionary from HTTP header text.
-
- >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
- >>> sorted(h.items())
- [('Content-Length', '42'), ('Content-Type', 'text/html')]
- """
- h = cls()
- for line in _CRLF_RE.split(headers):
- if line:
- h.parse_line(line)
- return h
-
- # MutableMapping abstract method implementations.
-
- def __setitem__(self, name, value):
- norm_name = _normalized_headers[name]
- self._dict[norm_name] = value
- self._as_list[norm_name] = [value]
-
- def __getitem__(self, name):
- # type: (str) -> str
- return self._dict[_normalized_headers[name]]
-
- def __delitem__(self, name):
- norm_name = _normalized_headers[name]
- del self._dict[norm_name]
- del self._as_list[norm_name]
-
- def __len__(self):
- return len(self._dict)
-
- def __iter__(self):
- return iter(self._dict)
-
- def copy(self):
- # defined in dict but not in MutableMapping.
- return HTTPHeaders(self)
-
- # Use our overridden copy method for the copy.copy module.
- # This makes shallow copies one level deeper, but preserves
- # the appearance that HTTPHeaders is a single container.
- __copy__ = copy
-
- def __str__(self):
- lines = []
- for name, value in self.get_all():
- lines.append("%s: %s\n" % (name, value))
- return "".join(lines)
-
- __unicode__ = __str__
-
-
-class HTTPServerRequest(object):
- """A single HTTP request.
-
- All attributes are type `str` unless otherwise noted.
-
- .. attribute:: method
-
- HTTP request method, e.g. "GET" or "POST"
-
- .. attribute:: uri
-
- The requested uri.
-
- .. attribute:: path
-
- The path portion of `uri`
-
- .. attribute:: query
-
- The query portion of `uri`
-
- .. attribute:: version
-
- HTTP version specified in request, e.g. "HTTP/1.1"
-
- .. attribute:: headers
-
- `.HTTPHeaders` dictionary-like object for request headers. Acts like
- a case-insensitive dictionary with additional methods for repeated
- headers.
-
- .. attribute:: body
-
- Request body, if present, as a byte string.
-
- .. attribute:: remote_ip
-
- Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
- will pass along the real IP address provided by a load balancer
- in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
-
- .. versionchanged:: 3.1
- The list format of ``X-Forwarded-For`` is now supported.
-
- .. attribute:: protocol
-
- The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
- is set, will pass along the protocol used by a load balancer if
- reported via an ``X-Scheme`` header.
-
- .. attribute:: host
-
- The requested hostname, usually taken from the ``Host`` header.
-
- .. attribute:: arguments
-
- GET/POST arguments are available in the arguments property, which
- maps arguments names to lists of values (to support multiple values
- for individual names). Names are of type `str`, while arguments
- are byte strings. Note that this is different from
- `.RequestHandler.get_argument`, which returns argument values as
- unicode strings.
-
- .. attribute:: query_arguments
-
- Same format as ``arguments``, but contains only arguments extracted
- from the query string.
-
- .. versionadded:: 3.2
-
- .. attribute:: body_arguments
-
- Same format as ``arguments``, but contains only arguments extracted
- from the request body.
-
- .. versionadded:: 3.2
-
- .. attribute:: files
-
- File uploads are available in the files property, which maps file
- names to lists of `.HTTPFile`.
-
- .. attribute:: connection
-
- An HTTP request is attached to a single HTTP connection, which can
- be accessed through the "connection" attribute. Since connections
- are typically kept open in HTTP/1.1, multiple requests can be handled
- sequentially on a single connection.
-
- .. versionchanged:: 4.0
- Moved from ``tornado.httpserver.HTTPRequest``.
- """
- def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
- body=None, host=None, files=None, connection=None,
- start_line=None, server_connection=None):
- if start_line is not None:
- method, uri, version = start_line
- self.method = method
- self.uri = uri
- self.version = version
- self.headers = headers or HTTPHeaders()
- self.body = body or b""
-
- # set remote IP and protocol
- context = getattr(connection, 'context', None)
- self.remote_ip = getattr(context, 'remote_ip', None)
- self.protocol = getattr(context, 'protocol', "http")
-
- self.host = host or self.headers.get("Host") or "127.0.0.1"
- self.host_name = split_host_and_port(self.host.lower())[0]
- self.files = files or {}
- self.connection = connection
- self.server_connection = server_connection
- self._start_time = time.time()
- self._finish_time = None
-
- self.path, sep, self.query = uri.partition('?')
- self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
- self.query_arguments = copy.deepcopy(self.arguments)
- self.body_arguments = {}
-
- def supports_http_1_1(self):
- """Returns True if this request supports HTTP/1.1 semantics.
-
- .. deprecated:: 4.0
- Applications are less likely to need this information with the
- introduction of `.HTTPConnection`. If you still need it, access
- the ``version`` attribute directly.
- """
- return self.version == "HTTP/1.1"
-
- @property
- def cookies(self):
- """A dictionary of Cookie.Morsel objects."""
- if not hasattr(self, "_cookies"):
- self._cookies = Cookie.SimpleCookie()
- if "Cookie" in self.headers:
- try:
- parsed = parse_cookie(self.headers["Cookie"])
- except Exception:
- pass
- else:
- for k, v in parsed.items():
- try:
- self._cookies[k] = v
- except Exception:
- # SimpleCookie imposes some restrictions on keys;
- # parse_cookie does not. Discard any cookies
- # with disallowed keys.
- pass
- return self._cookies
-
- def write(self, chunk, callback=None):
- """Writes the given chunk to the response stream.
-
- .. deprecated:: 4.0
- Use ``request.connection`` and the `.HTTPConnection` methods
- to write the response.
- """
- assert isinstance(chunk, bytes)
- assert self.version.startswith("HTTP/1."), \
- "deprecated interface only supported in HTTP/1.x"
- self.connection.write(chunk, callback=callback)
-
- def finish(self):
- """Finishes this HTTP request on the open connection.
-
- .. deprecated:: 4.0
- Use ``request.connection`` and the `.HTTPConnection` methods
- to write the response.
- """
- self.connection.finish()
- self._finish_time = time.time()
-
- def full_url(self):
- """Reconstructs the full URL for this request."""
- return self.protocol + "://" + self.host + self.uri
-
- def request_time(self):
- """Returns the amount of time it took for this request to execute."""
- if self._finish_time is None:
- return time.time() - self._start_time
- else:
- return self._finish_time - self._start_time
-
- def get_ssl_certificate(self, binary_form=False):
- """Returns the client's SSL certificate, if any.
-
- To use client certificates, the HTTPServer's
- `ssl.SSLContext.verify_mode` field must be set, e.g.::
-
- ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
- ssl_ctx.load_cert_chain("foo.crt", "foo.key")
- ssl_ctx.load_verify_locations("cacerts.pem")
- ssl_ctx.verify_mode = ssl.CERT_REQUIRED
- server = HTTPServer(app, ssl_options=ssl_ctx)
-
- By default, the return value is a dictionary (or None, if no
- client certificate is present). If ``binary_form`` is true, a
- DER-encoded form of the certificate is returned instead. See
- SSLSocket.getpeercert() in the standard library for more
- details.
- http://docs.python.org/library/ssl.html#sslsocket-objects
- """
- try:
- return self.connection.stream.socket.getpeercert(
- binary_form=binary_form)
- except SSLError:
- return None
-
- def _parse_body(self):
- parse_body_arguments(
- self.headers.get("Content-Type", ""), self.body,
- self.body_arguments, self.files,
- self.headers)
-
- for k, v in self.body_arguments.items():
- self.arguments.setdefault(k, []).extend(v)
-
- def __repr__(self):
- attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
- args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
- return "%s(%s, headers=%s)" % (
- self.__class__.__name__, args, dict(self.headers))
-
-
-class HTTPInputError(Exception):
- """Exception class for malformed HTTP requests or responses
- from remote sources.
-
- .. versionadded:: 4.0
- """
- pass
-
-
-class HTTPOutputError(Exception):
- """Exception class for errors in HTTP output.
-
- .. versionadded:: 4.0
- """
- pass
-
-
-class HTTPServerConnectionDelegate(object):
- """Implement this interface to handle requests from `.HTTPServer`.
-
- .. versionadded:: 4.0
- """
- def start_request(self, server_conn, request_conn):
- """This method is called by the server when a new request has started.
-
- :arg server_conn: is an opaque object representing the long-lived
- (e.g. tcp-level) connection.
- :arg request_conn: is a `.HTTPConnection` object for a single
- request/response exchange.
-
- This method should return a `.HTTPMessageDelegate`.
- """
- raise NotImplementedError()
-
- def on_close(self, server_conn):
- """This method is called when a connection has been closed.
-
- :arg server_conn: is a server connection that has previously been
- passed to ``start_request``.
- """
- pass
-
-
-class HTTPMessageDelegate(object):
- """Implement this interface to handle an HTTP request or response.
-
- .. versionadded:: 4.0
- """
- def headers_received(self, start_line, headers):
- """Called when the HTTP headers have been received and parsed.
-
- :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
- depending on whether this is a client or server message.
- :arg headers: a `.HTTPHeaders` instance.
-
- Some `.HTTPConnection` methods can only be called during
- ``headers_received``.
-
- May return a `.Future`; if it does the body will not be read
- until it is done.
- """
- pass
-
- def data_received(self, chunk):
- """Called when a chunk of data has been received.
-
- May return a `.Future` for flow control.
- """
- pass
-
- def finish(self):
- """Called after the last chunk of data has been received."""
- pass
-
- def on_connection_close(self):
- """Called if the connection is closed without finishing the request.
-
- If ``headers_received`` is called, either ``finish`` or
- ``on_connection_close`` will be called, but not both.
- """
- pass
-
-
-class HTTPConnection(object):
- """Applications use this interface to write their responses.
-
- .. versionadded:: 4.0
- """
- def write_headers(self, start_line, headers, chunk=None, callback=None):
- """Write an HTTP header block.
-
- :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
- :arg headers: a `.HTTPHeaders` instance.
- :arg chunk: the first (optional) chunk of data. This is an optimization
- so that small responses can be written in the same call as their
- headers.
- :arg callback: a callback to be run when the write is complete.
-
- The ``version`` field of ``start_line`` is ignored.
-
- Returns a `.Future` if no callback is given.
- """
- raise NotImplementedError()
-
- def write(self, chunk, callback=None):
- """Writes a chunk of body data.
-
- The callback will be run when the write is complete. If no callback
- is given, returns a Future.
- """
- raise NotImplementedError()
-
- def finish(self):
- """Indicates that the last body data has been written.
- """
- raise NotImplementedError()
-
-
-def url_concat(url, args):
- """Concatenate url and arguments regardless of whether
- url has existing query parameters.
-
- ``args`` may be either a dictionary or a list of key-value pairs
- (the latter allows for multiple values with the same key.
-
- >>> url_concat("http://example.com/foo", dict(c="d"))
- 'http://example.com/foo?c=d'
- >>> url_concat("http://example.com/foo?a=b", dict(c="d"))
- 'http://example.com/foo?a=b&c=d'
- >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")])
- 'http://example.com/foo?a=b&c=d&c=d2'
- """
- if args is None:
- return url
- parsed_url = urlparse(url)
- if isinstance(args, dict):
- parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
- parsed_query.extend(args.items())
- elif isinstance(args, list) or isinstance(args, tuple):
- parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
- parsed_query.extend(args)
- else:
- err = "'args' parameter should be dict, list or tuple. Not {0}".format(
- type(args))
- raise TypeError(err)
- final_query = urlencode(parsed_query)
- url = urlunparse((
- parsed_url[0],
- parsed_url[1],
- parsed_url[2],
- parsed_url[3],
- final_query,
- parsed_url[5]))
- return url
-
-
-class HTTPFile(ObjectDict):
- """Represents a file uploaded via a form.
-
- For backwards compatibility, its instance attributes are also
- accessible as dictionary keys.
-
- * ``filename``
- * ``body``
- * ``content_type``
- """
- pass
-
-
-def _parse_request_range(range_header):
- """Parses a Range header.
-
- Returns either ``None`` or tuple ``(start, end)``.
- Note that while the HTTP headers use inclusive byte positions,
- this method returns indexes suitable for use in slices.
-
- >>> start, end = _parse_request_range("bytes=1-2")
- >>> start, end
- (1, 3)
- >>> [0, 1, 2, 3, 4][start:end]
- [1, 2]
- >>> _parse_request_range("bytes=6-")
- (6, None)
- >>> _parse_request_range("bytes=-6")
- (-6, None)
- >>> _parse_request_range("bytes=-0")
- (None, 0)
- >>> _parse_request_range("bytes=")
- (None, None)
- >>> _parse_request_range("foo=42")
- >>> _parse_request_range("bytes=1-2,6-10")
-
- Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
-
- See [0] for the details of the range header.
-
- [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
- """
- unit, _, value = range_header.partition("=")
- unit, value = unit.strip(), value.strip()
- if unit != "bytes":
- return None
- start_b, _, end_b = value.partition("-")
- try:
- start = _int_or_none(start_b)
- end = _int_or_none(end_b)
- except ValueError:
- return None
- if end is not None:
- if start is None:
- if end != 0:
- start = -end
- end = None
- else:
- end += 1
- return (start, end)
-
-
-def _get_content_range(start, end, total):
- """Returns a suitable Content-Range header:
-
- >>> print(_get_content_range(None, 1, 4))
- bytes 0-0/4
- >>> print(_get_content_range(1, 3, 4))
- bytes 1-2/4
- >>> print(_get_content_range(None, None, 4))
- bytes 0-3/4
- """
- start = start or 0
- end = (end or total) - 1
- return "bytes %s-%s/%s" % (start, end, total)
-
-
-def _int_or_none(val):
- val = val.strip()
- if val == "":
- return None
- return int(val)
-
-
-def parse_body_arguments(content_type, body, arguments, files, headers=None):
- """Parses a form request body.
-
- Supports ``application/x-www-form-urlencoded`` and
- ``multipart/form-data``. The ``content_type`` parameter should be
- a string and ``body`` should be a byte string. The ``arguments``
- and ``files`` parameters are dictionaries that will be updated
- with the parsed contents.
- """
- if headers and 'Content-Encoding' in headers:
- gen_log.warning("Unsupported Content-Encoding: %s",
- headers['Content-Encoding'])
- return
- if content_type.startswith("application/x-www-form-urlencoded"):
- try:
- uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
- except Exception as e:
- gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
- uri_arguments = {}
- for name, values in uri_arguments.items():
- if values:
- arguments.setdefault(name, []).extend(values)
- elif content_type.startswith("multipart/form-data"):
- try:
- fields = content_type.split(";")
- for field in fields:
- k, sep, v = field.strip().partition("=")
- if k == "boundary" and v:
- parse_multipart_form_data(utf8(v), body, arguments, files)
- break
- else:
- raise ValueError("multipart boundary not found")
- except Exception as e:
- gen_log.warning("Invalid multipart/form-data: %s", e)
-
-
-def parse_multipart_form_data(boundary, data, arguments, files):
- """Parses a ``multipart/form-data`` body.
-
- The ``boundary`` and ``data`` parameters are both byte strings.
- The dictionaries given in the arguments and files parameters
- will be updated with the contents of the body.
- """
- # The standard allows for the boundary to be quoted in the header,
- # although it's rare (it happens at least for google app engine
- # xmpp). I think we're also supposed to handle backslash-escapes
- # here but I'll save that until we see a client that uses them
- # in the wild.
- if boundary.startswith(b'"') and boundary.endswith(b'"'):
- boundary = boundary[1:-1]
- final_boundary_index = data.rfind(b"--" + boundary + b"--")
- if final_boundary_index == -1:
- gen_log.warning("Invalid multipart/form-data: no final boundary")
- return
- parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
- for part in parts:
- if not part:
- continue
- eoh = part.find(b"\r\n\r\n")
- if eoh == -1:
- gen_log.warning("multipart/form-data missing headers")
- continue
- headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
- disp_header = headers.get("Content-Disposition", "")
- disposition, disp_params = _parse_header(disp_header)
- if disposition != "form-data" or not part.endswith(b"\r\n"):
- gen_log.warning("Invalid multipart/form-data")
- continue
- value = part[eoh + 4:-2]
- if not disp_params.get("name"):
- gen_log.warning("multipart/form-data value missing name")
- continue
- name = disp_params["name"]
- if disp_params.get("filename"):
- ctype = headers.get("Content-Type", "application/unknown")
- files.setdefault(name, []).append(HTTPFile( # type: ignore
- filename=disp_params["filename"], body=value,
- content_type=ctype))
- else:
- arguments.setdefault(name, []).append(value)
-
-
-def format_timestamp(ts):
- """Formats a timestamp in the format used by HTTP.
-
- The argument may be a numeric timestamp as returned by `time.time`,
- a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
- object.
-
- >>> format_timestamp(1359312200)
- 'Sun, 27 Jan 2013 18:43:20 GMT'
- """
- if isinstance(ts, numbers.Real):
- pass
- elif isinstance(ts, (tuple, time.struct_time)):
- ts = calendar.timegm(ts)
- elif isinstance(ts, datetime.datetime):
- ts = calendar.timegm(ts.utctimetuple())
- else:
- raise TypeError("unknown timestamp type: %r" % ts)
- return email.utils.formatdate(ts, usegmt=True)
-
-
-RequestStartLine = collections.namedtuple(
- 'RequestStartLine', ['method', 'path', 'version'])
-
-
-def parse_request_start_line(line):
- """Returns a (method, path, version) tuple for an HTTP 1.x request line.
-
- The response is a `collections.namedtuple`.
-
- >>> parse_request_start_line("GET /foo HTTP/1.1")
- RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
- """
- try:
- method, path, version = line.split(" ")
- except ValueError:
- raise HTTPInputError("Malformed HTTP request line")
- if not re.match(r"^HTTP/1\.[0-9]$", version):
- raise HTTPInputError(
- "Malformed HTTP version in HTTP Request-Line: %r" % version)
- return RequestStartLine(method, path, version)
-
-
-ResponseStartLine = collections.namedtuple(
- 'ResponseStartLine', ['version', 'code', 'reason'])
-
-
-def parse_response_start_line(line):
- """Returns a (version, code, reason) tuple for an HTTP 1.x response line.
-
- The response is a `collections.namedtuple`.
-
- >>> parse_response_start_line("HTTP/1.1 200 OK")
- ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
- """
- line = native_str(line)
- match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
- if not match:
- raise HTTPInputError("Error parsing response start line")
- return ResponseStartLine(match.group(1), int(match.group(2)),
- match.group(3))
-
-# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
-# The original 2.7 version of this code did not correctly support some
-# combinations of semicolons and double quotes.
-# It has also been modified to support valueless parameters as seen in
-# websocket extension negotiations.
-
-
-def _parseparam(s):
- while s[:1] == ';':
- s = s[1:]
- end = s.find(';')
- while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
- end = s.find(';', end + 1)
- if end < 0:
- end = len(s)
- f = s[:end]
- yield f.strip()
- s = s[end:]
-
-
-def _parse_header(line):
- """Parse a Content-type like header.
-
- Return the main content-type and a dictionary of options.
-
- """
- parts = _parseparam(';' + line)
- key = next(parts)
- pdict = {}
- for p in parts:
- i = p.find('=')
- if i >= 0:
- name = p[:i].strip().lower()
- value = p[i + 1:].strip()
- if len(value) >= 2 and value[0] == value[-1] == '"':
- value = value[1:-1]
- value = value.replace('\\\\', '\\').replace('\\"', '"')
- pdict[name] = value
- else:
- pdict[p] = None
- return key, pdict
-
-
-def _encode_header(key, pdict):
- """Inverse of _parse_header.
-
- >>> _encode_header('permessage-deflate',
- ... {'client_max_window_bits': 15, 'client_no_context_takeover': None})
- 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
- """
- if not pdict:
- return key
- out = [key]
- # Sort the parameters just to make it easy to test.
- for k, v in sorted(pdict.items()):
- if v is None:
- out.append(k)
- else:
- # TODO: quote if necessary.
- out.append('%s=%s' % (k, v))
- return '; '.join(out)
-
-
-def doctests():
- import doctest
- return doctest.DocTestSuite()
-
-
-def split_host_and_port(netloc):
- """Returns ``(host, port)`` tuple from ``netloc``.
-
- Returned ``port`` will be ``None`` if not present.
-
- .. versionadded:: 4.1
- """
- match = re.match(r'^(.+):(\d+)$', netloc)
- if match:
- host = match.group(1)
- port = int(match.group(2))
- else:
- host = netloc
- port = None
- return (host, port)
-
-
-_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
-_QuotePatt = re.compile(r"[\\].")
-_nulljoin = ''.join
-
-
-def _unquote_cookie(str):
- """Handle double quotes and escaping in cookie values.
-
- This method is copied verbatim from the Python 3.5 standard
- library (http.cookies._unquote) so we don't have to depend on
- non-public interfaces.
- """
- # If there aren't any doublequotes,
- # then there can't be any special characters. See RFC 2109.
- if str is None or len(str) < 2:
- return str
- if str[0] != '"' or str[-1] != '"':
- return str
-
- # We have to assume that we must decode this string.
- # Down to work.
-
- # Remove the "s
- str = str[1:-1]
-
- # Check for special sequences. Examples:
- # \012 --> \n
- # \" --> "
- #
- i = 0
- n = len(str)
- res = []
- while 0 <= i < n:
- o_match = _OctalPatt.search(str, i)
- q_match = _QuotePatt.search(str, i)
- if not o_match and not q_match: # Neither matched
- res.append(str[i:])
- break
- # else:
- j = k = -1
- if o_match:
- j = o_match.start(0)
- if q_match:
- k = q_match.start(0)
- if q_match and (not o_match or k < j): # QuotePatt matched
- res.append(str[i:k])
- res.append(str[k + 1])
- i = k + 2
- else: # OctalPatt matched
- res.append(str[i:j])
- res.append(chr(int(str[j + 1:j + 4], 8)))
- i = j + 4
- return _nulljoin(res)
-
-
-def parse_cookie(cookie):
- """Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
-
- This function attempts to mimic browser cookie parsing behavior;
- it specifically does not follow any of the cookie-related RFCs
- (because browsers don't either).
-
- The algorithm used is identical to that used by Django version 1.9.10.
-
- .. versionadded:: 4.4.2
- """
- cookiedict = {}
- for chunk in cookie.split(str(';')):
- if str('=') in chunk:
- key, val = chunk.split(str('='), 1)
- else:
- # Assume an empty name per
- # https://bugzilla.mozilla.org/show_bug.cgi?id=169091
- key, val = str(''), chunk
- key, val = key.strip(), val.strip()
- if key or val:
- # unquote using Python's algorithm.
- cookiedict[key] = _unquote_cookie(val)
- return cookiedict
+ """A dictionary that maintains ``Http-Header-Case`` for all keys.
+
+ Supports multiple values per key via a pair of new methods,
+ `add()` and `get_list()`. The regular dictionary interface
+ returns a single value per key, with multiple values joined by a
+ comma.
+
+ >>> h = HTTPHeaders({"content-type": "text/html"})
+ >>> list(h.keys())
+ ['Content-Type']
+ >>> h["Content-Type"]
+ 'text/html'
+
+ >>> h.add("Set-Cookie", "A=B")
+ >>> h.add("Set-Cookie", "C=D")
+ >>> h["set-cookie"]
+ 'A=B,C=D'
+ >>> h.get_list("set-cookie")
+ ['A=B', 'C=D']
+
+ >>> for (k,v) in sorted(h.get_all()):
+ ... print('%s: %s' % (k,v))
+ ...
+ Content-Type: text/html
+ Set-Cookie: A=B
+ Set-Cookie: C=D
+ """
+ def __init__(self, *args, **kwargs):
+ self._dict = {} # type: typing.Dict[str, str]
+ self._as_list = {} # type: typing.Dict[str, typing.List[str]]
+ self._last_key = None
+ if (len(args) == 1 and len(kwargs) == 0 and
+ isinstance(args[0], HTTPHeaders)):
+ # Copy constructor
+ for k, v in args[0].get_all():
+ self.add(k, v)
+ else:
+ # Dict-style initialization
+ self.update(*args, **kwargs)
+
+ # new public methods
+
+ def add(self, name, value):
+ # type: (str, str) -> None
+ """Adds a new value for the given key."""
+ norm_name = _normalized_headers[name]
+ self._last_key = norm_name
+ if norm_name in self:
+ self._dict[norm_name] = (native_str(self[norm_name]) + ',' +
+ native_str(value))
+ self._as_list[norm_name].append(value)
+ else:
+ self[norm_name] = value
+
+ def get_list(self, name):
+ """Returns all values for the given header as a list."""
+ norm_name = _normalized_headers[name]
+ return self._as_list.get(norm_name, [])
+
+ def get_all(self):
+ # type: () -> typing.Iterable[typing.Tuple[str, str]]
+ """Returns an iterable of all (name, value) pairs.
+
+ If a header has multiple values, multiple pairs will be
+ returned with the same name.
+ """
+ for name, values in self._as_list.items():
+ for value in values:
+ yield (name, value)
+
+ def parse_line(self, line):
+ """Updates the dictionary with a single header line.
+
+ >>> h = HTTPHeaders()
+ >>> h.parse_line("Content-Type: text/html")
+ >>> h.get('content-type')
+ 'text/html'
+ """
+ if line[0].isspace():
+ # continuation of a multi-line header
+ new_part = ' ' + line.lstrip()
+ self._as_list[self._last_key][-1] += new_part
+ self._dict[self._last_key] += new_part
+ else:
+ name, value = line.split(":", 1)
+ self.add(name, value.strip())
+
+ @classmethod
+ def parse(cls, headers):
+ """Returns a dictionary from HTTP header text.
+
+ >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n")
+ >>> sorted(h.items())
+ [('Content-Length', '42'), ('Content-Type', 'text/html')]
+ """
+ h = cls()
+ for line in _CRLF_RE.split(headers):
+ if line:
+ h.parse_line(line)
+ return h
+
+ # MutableMapping abstract method implementations.
+
+ def __setitem__(self, name, value):
+ norm_name = _normalized_headers[name]
+ self._dict[norm_name] = value
+ self._as_list[norm_name] = [value]
+
+ def __getitem__(self, name):
+ # type: (str) -> str
+ return self._dict[_normalized_headers[name]]
+
+ def __delitem__(self, name):
+ norm_name = _normalized_headers[name]
+ del self._dict[norm_name]
+ del self._as_list[norm_name]
+
+ def __len__(self):
+ return len(self._dict)
+
+ def __iter__(self):
+ return iter(self._dict)
+
+ def copy(self):
+ # defined in dict but not in MutableMapping.
+ return HTTPHeaders(self)
+
+ # Use our overridden copy method for the copy.copy module.
+ # This makes shallow copies one level deeper, but preserves
+ # the appearance that HTTPHeaders is a single container.
+ __copy__ = copy
+
+ def __str__(self):
+ lines = []
+ for name, value in self.get_all():
+ lines.append("%s: %s\n" % (name, value))
+ return "".join(lines)
+
+ __unicode__ = __str__
+
+
+class HTTPServerRequest(object):
+ """A single HTTP request.
+
+ All attributes are type `str` unless otherwise noted.
+
+ .. attribute:: method
+
+ HTTP request method, e.g. "GET" or "POST"
+
+ .. attribute:: uri
+
+ The requested uri.
+
+ .. attribute:: path
+
+ The path portion of `uri`
+
+ .. attribute:: query
+
+ The query portion of `uri`
+
+ .. attribute:: version
+
+ HTTP version specified in request, e.g. "HTTP/1.1"
+
+ .. attribute:: headers
+
+ `.HTTPHeaders` dictionary-like object for request headers. Acts like
+ a case-insensitive dictionary with additional methods for repeated
+ headers.
+
+ .. attribute:: body
+
+ Request body, if present, as a byte string.
+
+ .. attribute:: remote_ip
+
+ Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
+ will pass along the real IP address provided by a load balancer
+ in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
+
+ .. versionchanged:: 3.1
+ The list format of ``X-Forwarded-For`` is now supported.
+
+ .. attribute:: protocol
+
+ The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
+ is set, will pass along the protocol used by a load balancer if
+ reported via an ``X-Scheme`` header.
+
+ .. attribute:: host
+
+ The requested hostname, usually taken from the ``Host`` header.
+
+ .. attribute:: arguments
+
+ GET/POST arguments are available in the arguments property, which
+ maps arguments names to lists of values (to support multiple values
+ for individual names). Names are of type `str`, while arguments
+ are byte strings. Note that this is different from
+ `.RequestHandler.get_argument`, which returns argument values as
+ unicode strings.
+
+ .. attribute:: query_arguments
+
+ Same format as ``arguments``, but contains only arguments extracted
+ from the query string.
+
+ .. versionadded:: 3.2
+
+ .. attribute:: body_arguments
+
+ Same format as ``arguments``, but contains only arguments extracted
+ from the request body.
+
+ .. versionadded:: 3.2
+
+ .. attribute:: files
+
+ File uploads are available in the files property, which maps file
+ names to lists of `.HTTPFile`.
+
+ .. attribute:: connection
+
+ An HTTP request is attached to a single HTTP connection, which can
+ be accessed through the "connection" attribute. Since connections
+ are typically kept open in HTTP/1.1, multiple requests can be handled
+ sequentially on a single connection.
+
+ .. versionchanged:: 4.0
+ Moved from ``tornado.httpserver.HTTPRequest``.
+ """
+ def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
+ body=None, host=None, files=None, connection=None,
+ start_line=None, server_connection=None):
+ if start_line is not None:
+ method, uri, version = start_line
+ self.method = method
+ self.uri = uri
+ self.version = version
+ self.headers = headers or HTTPHeaders()
+ self.body = body or b""
+
+ # set remote IP and protocol
+ context = getattr(connection, 'context', None)
+ self.remote_ip = getattr(context, 'remote_ip', None)
+ self.protocol = getattr(context, 'protocol', "http")
+
+ self.host = host or self.headers.get("Host") or "127.0.0.1"
+ self.host_name = split_host_and_port(self.host.lower())[0]
+ self.files = files or {}
+ self.connection = connection
+ self.server_connection = server_connection
+ self._start_time = time.time()
+ self._finish_time = None
+
+ self.path, sep, self.query = uri.partition('?')
+ self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
+ self.query_arguments = copy.deepcopy(self.arguments)
+ self.body_arguments = {}
+
+ def supports_http_1_1(self):
+ """Returns True if this request supports HTTP/1.1 semantics.
+
+ .. deprecated:: 4.0
+ Applications are less likely to need this information with the
+ introduction of `.HTTPConnection`. If you still need it, access
+ the ``version`` attribute directly.
+ """
+ return self.version == "HTTP/1.1"
+
+ @property
+ def cookies(self):
+ """A dictionary of Cookie.Morsel objects."""
+ if not hasattr(self, "_cookies"):
+ self._cookies = Cookie.SimpleCookie()
+ if "Cookie" in self.headers:
+ try:
+ parsed = parse_cookie(self.headers["Cookie"])
+ except Exception:
+ pass
+ else:
+ for k, v in parsed.items():
+ try:
+ self._cookies[k] = v
+ except Exception:
+ # SimpleCookie imposes some restrictions on keys;
+ # parse_cookie does not. Discard any cookies
+ # with disallowed keys.
+ pass
+ return self._cookies
+
+ def write(self, chunk, callback=None):
+ """Writes the given chunk to the response stream.
+
+ .. deprecated:: 4.0
+ Use ``request.connection`` and the `.HTTPConnection` methods
+ to write the response.
+ """
+ assert isinstance(chunk, bytes)
+ assert self.version.startswith("HTTP/1."), \
+ "deprecated interface only supported in HTTP/1.x"
+ self.connection.write(chunk, callback=callback)
+
+ def finish(self):
+ """Finishes this HTTP request on the open connection.
+
+ .. deprecated:: 4.0
+ Use ``request.connection`` and the `.HTTPConnection` methods
+ to write the response.
+ """
+ self.connection.finish()
+ self._finish_time = time.time()
+
+ def full_url(self):
+ """Reconstructs the full URL for this request."""
+ return self.protocol + "://" + self.host + self.uri
+
+ def request_time(self):
+ """Returns the amount of time it took for this request to execute."""
+ if self._finish_time is None:
+ return time.time() - self._start_time
+ else:
+ return self._finish_time - self._start_time
+
+ def get_ssl_certificate(self, binary_form=False):
+ """Returns the client's SSL certificate, if any.
+
+ To use client certificates, the HTTPServer's
+ `ssl.SSLContext.verify_mode` field must be set, e.g.::
+
+ ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ssl_ctx.load_cert_chain("foo.crt", "foo.key")
+ ssl_ctx.load_verify_locations("cacerts.pem")
+ ssl_ctx.verify_mode = ssl.CERT_REQUIRED
+ server = HTTPServer(app, ssl_options=ssl_ctx)
+
+ By default, the return value is a dictionary (or None, if no
+ client certificate is present). If ``binary_form`` is true, a
+ DER-encoded form of the certificate is returned instead. See
+ SSLSocket.getpeercert() in the standard library for more
+ details.
+ http://docs.python.org/library/ssl.html#sslsocket-objects
+ """
+ try:
+ return self.connection.stream.socket.getpeercert(
+ binary_form=binary_form)
+ except SSLError:
+ return None
+
+ def _parse_body(self):
+ parse_body_arguments(
+ self.headers.get("Content-Type", ""), self.body,
+ self.body_arguments, self.files,
+ self.headers)
+
+ for k, v in self.body_arguments.items():
+ self.arguments.setdefault(k, []).extend(v)
+
+ def __repr__(self):
+ attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
+ args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
+ return "%s(%s, headers=%s)" % (
+ self.__class__.__name__, args, dict(self.headers))
+
+
+class HTTPInputError(Exception):
+ """Exception class for malformed HTTP requests or responses
+ from remote sources.
+
+ .. versionadded:: 4.0
+ """
+ pass
+
+
+class HTTPOutputError(Exception):
+ """Exception class for errors in HTTP output.
+
+ .. versionadded:: 4.0
+ """
+ pass
+
+
+class HTTPServerConnectionDelegate(object):
+ """Implement this interface to handle requests from `.HTTPServer`.
+
+ .. versionadded:: 4.0
+ """
+ def start_request(self, server_conn, request_conn):
+ """This method is called by the server when a new request has started.
+
+ :arg server_conn: is an opaque object representing the long-lived
+ (e.g. tcp-level) connection.
+ :arg request_conn: is a `.HTTPConnection` object for a single
+ request/response exchange.
+
+ This method should return a `.HTTPMessageDelegate`.
+ """
+ raise NotImplementedError()
+
+ def on_close(self, server_conn):
+ """This method is called when a connection has been closed.
+
+ :arg server_conn: is a server connection that has previously been
+ passed to ``start_request``.
+ """
+ pass
+
+
+class HTTPMessageDelegate(object):
+ """Implement this interface to handle an HTTP request or response.
+
+ .. versionadded:: 4.0
+ """
+ def headers_received(self, start_line, headers):
+ """Called when the HTTP headers have been received and parsed.
+
+ :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
+ depending on whether this is a client or server message.
+ :arg headers: a `.HTTPHeaders` instance.
+
+ Some `.HTTPConnection` methods can only be called during
+ ``headers_received``.
+
+ May return a `.Future`; if it does the body will not be read
+ until it is done.
+ """
+ pass
+
+ def data_received(self, chunk):
+ """Called when a chunk of data has been received.
+
+ May return a `.Future` for flow control.
+ """
+ pass
+
+ def finish(self):
+ """Called after the last chunk of data has been received."""
+ pass
+
+ def on_connection_close(self):
+ """Called if the connection is closed without finishing the request.
+
+ If ``headers_received`` is called, either ``finish`` or
+ ``on_connection_close`` will be called, but not both.
+ """
+ pass
+
+
+class HTTPConnection(object):
+ """Applications use this interface to write their responses.
+
+ .. versionadded:: 4.0
+ """
+ def write_headers(self, start_line, headers, chunk=None, callback=None):
+ """Write an HTTP header block.
+
+ :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
+ :arg headers: a `.HTTPHeaders` instance.
+ :arg chunk: the first (optional) chunk of data. This is an optimization
+ so that small responses can be written in the same call as their
+ headers.
+ :arg callback: a callback to be run when the write is complete.
+
+ The ``version`` field of ``start_line`` is ignored.
+
+ Returns a `.Future` if no callback is given.
+ """
+ raise NotImplementedError()
+
+ def write(self, chunk, callback=None):
+ """Writes a chunk of body data.
+
+ The callback will be run when the write is complete. If no callback
+ is given, returns a Future.
+ """
+ raise NotImplementedError()
+
+ def finish(self):
+ """Indicates that the last body data has been written.
+ """
+ raise NotImplementedError()
+
+
+def url_concat(url, args):
+ """Concatenate url and arguments regardless of whether
+ url has existing query parameters.
+
+ ``args`` may be either a dictionary or a list of key-value pairs
+ (the latter allows for multiple values with the same key.
+
+ >>> url_concat("http://example.com/foo", dict(c="d"))
+ 'http://example.com/foo?c=d'
+ >>> url_concat("http://example.com/foo?a=b", dict(c="d"))
+ 'http://example.com/foo?a=b&c=d'
+ >>> url_concat("http://example.com/foo?a=b", [("c", "d"), ("c", "d2")])
+ 'http://example.com/foo?a=b&c=d&c=d2'
+ """
+ if args is None:
+ return url
+ parsed_url = urlparse(url)
+ if isinstance(args, dict):
+ parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
+ parsed_query.extend(args.items())
+ elif isinstance(args, list) or isinstance(args, tuple):
+ parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
+ parsed_query.extend(args)
+ else:
+ err = "'args' parameter should be dict, list or tuple. Not {0}".format(
+ type(args))
+ raise TypeError(err)
+ final_query = urlencode(parsed_query)
+ url = urlunparse((
+ parsed_url[0],
+ parsed_url[1],
+ parsed_url[2],
+ parsed_url[3],
+ final_query,
+ parsed_url[5]))
+ return url
+
+
+class HTTPFile(ObjectDict):
+ """Represents a file uploaded via a form.
+
+ For backwards compatibility, its instance attributes are also
+ accessible as dictionary keys.
+
+ * ``filename``
+ * ``body``
+ * ``content_type``
+ """
+ pass
+
+
+def _parse_request_range(range_header):
+ """Parses a Range header.
+
+ Returns either ``None`` or tuple ``(start, end)``.
+ Note that while the HTTP headers use inclusive byte positions,
+ this method returns indexes suitable for use in slices.
+
+ >>> start, end = _parse_request_range("bytes=1-2")
+ >>> start, end
+ (1, 3)
+ >>> [0, 1, 2, 3, 4][start:end]
+ [1, 2]
+ >>> _parse_request_range("bytes=6-")
+ (6, None)
+ >>> _parse_request_range("bytes=-6")
+ (-6, None)
+ >>> _parse_request_range("bytes=-0")
+ (None, 0)
+ >>> _parse_request_range("bytes=")
+ (None, None)
+ >>> _parse_request_range("foo=42")
+ >>> _parse_request_range("bytes=1-2,6-10")
+
+ Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed).
+
+ See [0] for the details of the range header.
+
+ [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges
+ """
+ unit, _, value = range_header.partition("=")
+ unit, value = unit.strip(), value.strip()
+ if unit != "bytes":
+ return None
+ start_b, _, end_b = value.partition("-")
+ try:
+ start = _int_or_none(start_b)
+ end = _int_or_none(end_b)
+ except ValueError:
+ return None
+ if end is not None:
+ if start is None:
+ if end != 0:
+ start = -end
+ end = None
+ else:
+ end += 1
+ return (start, end)
+
+
+def _get_content_range(start, end, total):
+ """Returns a suitable Content-Range header:
+
+ >>> print(_get_content_range(None, 1, 4))
+ bytes 0-0/4
+ >>> print(_get_content_range(1, 3, 4))
+ bytes 1-2/4
+ >>> print(_get_content_range(None, None, 4))
+ bytes 0-3/4
+ """
+ start = start or 0
+ end = (end or total) - 1
+ return "bytes %s-%s/%s" % (start, end, total)
+
+
+def _int_or_none(val):
+ val = val.strip()
+ if val == "":
+ return None
+ return int(val)
+
+
+def parse_body_arguments(content_type, body, arguments, files, headers=None):
+ """Parses a form request body.
+
+ Supports ``application/x-www-form-urlencoded`` and
+ ``multipart/form-data``. The ``content_type`` parameter should be
+ a string and ``body`` should be a byte string. The ``arguments``
+ and ``files`` parameters are dictionaries that will be updated
+ with the parsed contents.
+ """
+ if headers and 'Content-Encoding' in headers:
+ gen_log.warning("Unsupported Content-Encoding: %s",
+ headers['Content-Encoding'])
+ return
+ if content_type.startswith("application/x-www-form-urlencoded"):
+ try:
+ uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
+ except Exception as e:
+ gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
+ uri_arguments = {}
+ for name, values in uri_arguments.items():
+ if values:
+ arguments.setdefault(name, []).extend(values)
+ elif content_type.startswith("multipart/form-data"):
+ try:
+ fields = content_type.split(";")
+ for field in fields:
+ k, sep, v = field.strip().partition("=")
+ if k == "boundary" and v:
+ parse_multipart_form_data(utf8(v), body, arguments, files)
+ break
+ else:
+ raise ValueError("multipart boundary not found")
+ except Exception as e:
+ gen_log.warning("Invalid multipart/form-data: %s", e)
+
+
+def parse_multipart_form_data(boundary, data, arguments, files):
+ """Parses a ``multipart/form-data`` body.
+
+ The ``boundary`` and ``data`` parameters are both byte strings.
+ The dictionaries given in the arguments and files parameters
+ will be updated with the contents of the body.
+ """
+ # The standard allows for the boundary to be quoted in the header,
+ # although it's rare (it happens at least for google app engine
+ # xmpp). I think we're also supposed to handle backslash-escapes
+ # here but I'll save that until we see a client that uses them
+ # in the wild.
+ if boundary.startswith(b'"') and boundary.endswith(b'"'):
+ boundary = boundary[1:-1]
+ final_boundary_index = data.rfind(b"--" + boundary + b"--")
+ if final_boundary_index == -1:
+ gen_log.warning("Invalid multipart/form-data: no final boundary")
+ return
+ parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n")
+ for part in parts:
+ if not part:
+ continue
+ eoh = part.find(b"\r\n\r\n")
+ if eoh == -1:
+ gen_log.warning("multipart/form-data missing headers")
+ continue
+ headers = HTTPHeaders.parse(part[:eoh].decode("utf-8"))
+ disp_header = headers.get("Content-Disposition", "")
+ disposition, disp_params = _parse_header(disp_header)
+ if disposition != "form-data" or not part.endswith(b"\r\n"):
+ gen_log.warning("Invalid multipart/form-data")
+ continue
+ value = part[eoh + 4:-2]
+ if not disp_params.get("name"):
+ gen_log.warning("multipart/form-data value missing name")
+ continue
+ name = disp_params["name"]
+ if disp_params.get("filename"):
+ ctype = headers.get("Content-Type", "application/unknown")
+ files.setdefault(name, []).append(HTTPFile( # type: ignore
+ filename=disp_params["filename"], body=value,
+ content_type=ctype))
+ else:
+ arguments.setdefault(name, []).append(value)
+
+
+def format_timestamp(ts):
+ """Formats a timestamp in the format used by HTTP.
+
+ The argument may be a numeric timestamp as returned by `time.time`,
+ a time tuple as returned by `time.gmtime`, or a `datetime.datetime`
+ object.
+
+ >>> format_timestamp(1359312200)
+ 'Sun, 27 Jan 2013 18:43:20 GMT'
+ """
+ if isinstance(ts, numbers.Real):
+ pass
+ elif isinstance(ts, (tuple, time.struct_time)):
+ ts = calendar.timegm(ts)
+ elif isinstance(ts, datetime.datetime):
+ ts = calendar.timegm(ts.utctimetuple())
+ else:
+ raise TypeError("unknown timestamp type: %r" % ts)
+ return email.utils.formatdate(ts, usegmt=True)
+
+
+RequestStartLine = collections.namedtuple(
+ 'RequestStartLine', ['method', 'path', 'version'])
+
+
+def parse_request_start_line(line):
+ """Returns a (method, path, version) tuple for an HTTP 1.x request line.
+
+ The response is a `collections.namedtuple`.
+
+ >>> parse_request_start_line("GET /foo HTTP/1.1")
+ RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
+ """
+ try:
+ method, path, version = line.split(" ")
+ except ValueError:
+ raise HTTPInputError("Malformed HTTP request line")
+ if not re.match(r"^HTTP/1\.[0-9]$", version):
+ raise HTTPInputError(
+ "Malformed HTTP version in HTTP Request-Line: %r" % version)
+ return RequestStartLine(method, path, version)
+
+
+ResponseStartLine = collections.namedtuple(
+ 'ResponseStartLine', ['version', 'code', 'reason'])
+
+
+def parse_response_start_line(line):
+ """Returns a (version, code, reason) tuple for an HTTP 1.x response line.
+
+ The response is a `collections.namedtuple`.
+
+ >>> parse_response_start_line("HTTP/1.1 200 OK")
+ ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
+ """
+ line = native_str(line)
+ match = re.match("(HTTP/1.[0-9]) ([0-9]+) ([^\r]*)", line)
+ if not match:
+ raise HTTPInputError("Error parsing response start line")
+ return ResponseStartLine(match.group(1), int(match.group(2)),
+ match.group(3))
+
+# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
+# The original 2.7 version of this code did not correctly support some
+# combinations of semicolons and double quotes.
+# It has also been modified to support valueless parameters as seen in
+# websocket extension negotiations.
+
+
+def _parseparam(s):
+ while s[:1] == ';':
+ s = s[1:]
+ end = s.find(';')
+ while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+ end = s.find(';', end + 1)
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ yield f.strip()
+ s = s[end:]
+
+
+def _parse_header(line):
+ """Parse a Content-type like header.
+
+ Return the main content-type and a dictionary of options.
+
+ """
+ parts = _parseparam(';' + line)
+ key = next(parts)
+ pdict = {}
+ for p in parts:
+ i = p.find('=')
+ if i >= 0:
+ name = p[:i].strip().lower()
+ value = p[i + 1:].strip()
+ if len(value) >= 2 and value[0] == value[-1] == '"':
+ value = value[1:-1]
+ value = value.replace('\\\\', '\\').replace('\\"', '"')
+ pdict[name] = value
+ else:
+ pdict[p] = None
+ return key, pdict
+
+
+def _encode_header(key, pdict):
+ """Inverse of _parse_header.
+
+ >>> _encode_header('permessage-deflate',
+ ... {'client_max_window_bits': 15, 'client_no_context_takeover': None})
+ 'permessage-deflate; client_max_window_bits=15; client_no_context_takeover'
+ """
+ if not pdict:
+ return key
+ out = [key]
+ # Sort the parameters just to make it easy to test.
+ for k, v in sorted(pdict.items()):
+ if v is None:
+ out.append(k)
+ else:
+ # TODO: quote if necessary.
+ out.append('%s=%s' % (k, v))
+ return '; '.join(out)
+
+
+def doctests():
+ import doctest
+ return doctest.DocTestSuite()
+
+
+def split_host_and_port(netloc):
+ """Returns ``(host, port)`` tuple from ``netloc``.
+
+ Returned ``port`` will be ``None`` if not present.
+
+ .. versionadded:: 4.1
+ """
+ match = re.match(r'^(.+):(\d+)$', netloc)
+ if match:
+ host = match.group(1)
+ port = int(match.group(2))
+ else:
+ host = netloc
+ port = None
+ return (host, port)
+
+
+_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
+_QuotePatt = re.compile(r"[\\].")
+_nulljoin = ''.join
+
+
+def _unquote_cookie(str):
+ """Handle double quotes and escaping in cookie values.
+
+ This method is copied verbatim from the Python 3.5 standard
+ library (http.cookies._unquote) so we don't have to depend on
+ non-public interfaces.
+ """
+ # If there aren't any doublequotes,
+ # then there can't be any special characters. See RFC 2109.
+ if str is None or len(str) < 2:
+ return str
+ if str[0] != '"' or str[-1] != '"':
+ return str
+
+ # We have to assume that we must decode this string.
+ # Down to work.
+
+ # Remove the "s
+ str = str[1:-1]
+
+ # Check for special sequences. Examples:
+ # \012 --> \n
+ # \" --> "
+ #
+ i = 0
+ n = len(str)
+ res = []
+ while 0 <= i < n:
+ o_match = _OctalPatt.search(str, i)
+ q_match = _QuotePatt.search(str, i)
+ if not o_match and not q_match: # Neither matched
+ res.append(str[i:])
+ break
+ # else:
+ j = k = -1
+ if o_match:
+ j = o_match.start(0)
+ if q_match:
+ k = q_match.start(0)
+ if q_match and (not o_match or k < j): # QuotePatt matched
+ res.append(str[i:k])
+ res.append(str[k + 1])
+ i = k + 2
+ else: # OctalPatt matched
+ res.append(str[i:j])
+ res.append(chr(int(str[j + 1:j + 4], 8)))
+ i = j + 4
+ return _nulljoin(res)
+
+
+def parse_cookie(cookie):
+ """Parse a ``Cookie`` HTTP header into a dict of name/value pairs.
+
+ This function attempts to mimic browser cookie parsing behavior;
+ it specifically does not follow any of the cookie-related RFCs
+ (because browsers don't either).
+
+ The algorithm used is identical to that used by Django version 1.9.10.
+
+ .. versionadded:: 4.4.2
+ """
+ cookiedict = {}
+ for chunk in cookie.split(str(';')):
+ if str('=') in chunk:
+ key, val = chunk.split(str('='), 1)
+ else:
+ # Assume an empty name per
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=169091
+ key, val = str(''), chunk
+ key, val = key.strip(), val.strip()
+ if key or val:
+ # unquote using Python's algorithm.
+ cookiedict[key] = _unquote_cookie(val)
+ return cookiedict
diff --git a/contrib/python/tornado/tornado-4/tornado/ioloop.py b/contrib/python/tornado/tornado-4/tornado/ioloop.py
index ad35787fca..3dd129bb74 100644
--- a/contrib/python/tornado/tornado-4/tornado/ioloop.py
+++ b/contrib/python/tornado/tornado-4/tornado/ioloop.py
@@ -1,1041 +1,1041 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""An I/O event loop for non-blocking sockets.
-
-Typical applications will use a single `IOLoop` object, in the
-`IOLoop.instance` singleton. The `IOLoop.start` method should usually
-be called at the end of the ``main()`` function. Atypical applications may
-use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
-case.
-
-In addition to I/O events, the `IOLoop` can also schedule time-based events.
-`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import collections
-import datetime
-import errno
-import functools
-import heapq
-import itertools
-import logging
-import numbers
-import os
-import select
-import sys
-import threading
-import time
-import traceback
-import math
-
-from tornado.concurrent import TracebackFuture, is_future
-from tornado.log import app_log, gen_log
-from tornado.platform.auto import set_close_exec, Waker
-from tornado import stack_context
-from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
-
-try:
- import signal
-except ImportError:
- signal = None
-
-
-if PY3:
- import _thread as thread
-else:
- import thread
-
-
-_POLL_TIMEOUT = 3600.0
-
-
-class TimeoutError(Exception):
- pass
-
-
-class IOLoop(Configurable):
- """A level-triggered I/O loop.
-
- We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
- are available, or else we fall back on select(). If you are
- implementing a system that needs to handle thousands of
- simultaneous connections, you should use a system that supports
- either ``epoll`` or ``kqueue``.
-
- Example usage for a simple TCP server:
-
- .. testcode::
-
- import errno
- import functools
- import tornado.ioloop
- import socket
-
- def connection_ready(sock, fd, events):
- while True:
- try:
- connection, address = sock.accept()
- except socket.error as e:
- if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
- raise
- return
- connection.setblocking(0)
- handle_connection(connection, address)
-
- if __name__ == '__main__':
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- sock.setblocking(0)
- sock.bind(("", port))
- sock.listen(128)
-
- io_loop = tornado.ioloop.IOLoop.current()
- callback = functools.partial(connection_ready, sock)
- io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
- io_loop.start()
-
- .. testoutput::
- :hide:
-
- By default, a newly-constructed `IOLoop` becomes the thread's current
- `IOLoop`, unless there already is a current `IOLoop`. This behavior
- can be controlled with the ``make_current`` argument to the `IOLoop`
- constructor: if ``make_current=True``, the new `IOLoop` will always
- try to become current and it raises an error if there is already a
- current instance. If ``make_current=False``, the new `IOLoop` will
- not try to become current.
-
- .. versionchanged:: 4.2
- Added the ``make_current`` keyword argument to the `IOLoop`
- constructor.
- """
- # Constants from the epoll module
- _EPOLLIN = 0x001
- _EPOLLPRI = 0x002
- _EPOLLOUT = 0x004
- _EPOLLERR = 0x008
- _EPOLLHUP = 0x010
- _EPOLLRDHUP = 0x2000
- _EPOLLONESHOT = (1 << 30)
- _EPOLLET = (1 << 31)
-
- # Our events map exactly to the epoll events
- NONE = 0
- READ = _EPOLLIN
- WRITE = _EPOLLOUT
- ERROR = _EPOLLERR | _EPOLLHUP
-
- # Global lock for creating global IOLoop instance
- _instance_lock = threading.Lock()
-
- _current = threading.local()
-
- @staticmethod
- def instance():
- """Returns a global `IOLoop` instance.
-
- Most applications have a single, global `IOLoop` running on the
- main thread. Use this method to get this instance from
- another thread. In most other cases, it is better to use `current()`
- to get the current thread's `IOLoop`.
- """
- if not hasattr(IOLoop, "_instance"):
- with IOLoop._instance_lock:
- if not hasattr(IOLoop, "_instance"):
- # New instance after double check
- IOLoop._instance = IOLoop()
- return IOLoop._instance
-
- @staticmethod
- def initialized():
- """Returns true if the singleton instance has been created."""
- return hasattr(IOLoop, "_instance")
-
- def install(self):
- """Installs this `IOLoop` object as the singleton instance.
-
- This is normally not necessary as `instance()` will create
- an `IOLoop` on demand, but you may want to call `install` to use
- a custom subclass of `IOLoop`.
-
- When using an `IOLoop` subclass, `install` must be called prior
- to creating any objects that implicitly create their own
- `IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
- """
- assert not IOLoop.initialized()
- IOLoop._instance = self
-
- @staticmethod
- def clear_instance():
- """Clear the global `IOLoop` instance.
-
- .. versionadded:: 4.0
- """
- if hasattr(IOLoop, "_instance"):
- del IOLoop._instance
-
- @staticmethod
- def current(instance=True):
- """Returns the current thread's `IOLoop`.
-
- If an `IOLoop` is currently running or has been marked as
- current by `make_current`, returns that instance. If there is
- no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
- main thread's `IOLoop`, creating one if necessary) if ``instance``
- is true.
-
- In general you should use `IOLoop.current` as the default when
- constructing an asynchronous object, and use `IOLoop.instance`
- when you mean to communicate to the main thread from a different
- one.
-
- .. versionchanged:: 4.1
- Added ``instance`` argument to control the fallback to
- `IOLoop.instance()`.
- """
- current = getattr(IOLoop._current, "instance", None)
- if current is None and instance:
- return IOLoop.instance()
- return current
-
- def make_current(self):
- """Makes this the `IOLoop` for the current thread.
-
- An `IOLoop` automatically becomes current for its thread
- when it is started, but it is sometimes useful to call
- `make_current` explicitly before starting the `IOLoop`,
- so that code run at startup time can find the right
- instance.
-
- .. versionchanged:: 4.1
- An `IOLoop` created while there is no current `IOLoop`
- will automatically become current.
- """
- IOLoop._current.instance = self
-
- @staticmethod
- def clear_current():
- IOLoop._current.instance = None
-
- @classmethod
- def configurable_base(cls):
- return IOLoop
-
- @classmethod
- def configurable_default(cls):
- if hasattr(select, "epoll"):
- from tornado.platform.epoll import EPollIOLoop
- return EPollIOLoop
- if hasattr(select, "kqueue"):
- # Python 2.6+ on BSD or Mac
- from tornado.platform.kqueue import KQueueIOLoop
- return KQueueIOLoop
- from tornado.platform.select import SelectIOLoop
- return SelectIOLoop
-
- def initialize(self, make_current=None):
- if make_current is None:
- if IOLoop.current(instance=False) is None:
- self.make_current()
- elif make_current:
- if IOLoop.current(instance=False) is not None:
- raise RuntimeError("current IOLoop already exists")
- self.make_current()
-
- def close(self, all_fds=False):
- """Closes the `IOLoop`, freeing any resources used.
-
- If ``all_fds`` is true, all file descriptors registered on the
- IOLoop will be closed (not just the ones created by the
- `IOLoop` itself).
-
- Many applications will only use a single `IOLoop` that runs for the
- entire lifetime of the process. In that case closing the `IOLoop`
- is not necessary since everything will be cleaned up when the
- process exits. `IOLoop.close` is provided mainly for scenarios
- such as unit tests, which create and destroy a large number of
- ``IOLoops``.
-
- An `IOLoop` must be completely stopped before it can be closed. This
- means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
- be allowed to return before attempting to call `IOLoop.close()`.
- Therefore the call to `close` will usually appear just after
- the call to `start` rather than near the call to `stop`.
-
- .. versionchanged:: 3.1
- If the `IOLoop` implementation supports non-integer objects
- for "file descriptors", those objects will have their
- ``close`` method when ``all_fds`` is true.
- """
- raise NotImplementedError()
-
- def add_handler(self, fd, handler, events):
- """Registers the given handler to receive the given events for ``fd``.
-
- The ``fd`` argument may either be an integer file descriptor or
- a file-like object with a ``fileno()`` method (and optionally a
- ``close()`` method, which may be called when the `IOLoop` is shut
- down).
-
- The ``events`` argument is a bitwise or of the constants
- ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
-
- When an event occurs, ``handler(fd, events)`` will be run.
-
- .. versionchanged:: 4.0
- Added the ability to pass file-like objects in addition to
- raw file descriptors.
- """
- raise NotImplementedError()
-
- def update_handler(self, fd, events):
- """Changes the events we listen for ``fd``.
-
- .. versionchanged:: 4.0
- Added the ability to pass file-like objects in addition to
- raw file descriptors.
- """
- raise NotImplementedError()
-
- def remove_handler(self, fd):
- """Stop listening for events on ``fd``.
-
- .. versionchanged:: 4.0
- Added the ability to pass file-like objects in addition to
- raw file descriptors.
- """
- raise NotImplementedError()
-
- def set_blocking_signal_threshold(self, seconds, action):
- """Sends a signal if the `IOLoop` is blocked for more than
- ``s`` seconds.
-
- Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
- platform.
-
- The action parameter is a Python signal handler. Read the
- documentation for the `signal` module for more information.
- If ``action`` is None, the process will be killed if it is
- blocked for too long.
- """
- raise NotImplementedError()
-
- def set_blocking_log_threshold(self, seconds):
- """Logs a stack trace if the `IOLoop` is blocked for more than
- ``s`` seconds.
-
- Equivalent to ``set_blocking_signal_threshold(seconds,
- self.log_stack)``
- """
- self.set_blocking_signal_threshold(seconds, self.log_stack)
-
- def log_stack(self, signal, frame):
- """Signal handler to log the stack trace of the current thread.
-
- For use with `set_blocking_signal_threshold`.
- """
- gen_log.warning('IOLoop blocked for %f seconds in\n%s',
- self._blocking_signal_threshold,
- ''.join(traceback.format_stack(frame)))
-
- def start(self):
- """Starts the I/O loop.
-
- The loop will run until one of the callbacks calls `stop()`, which
- will make the loop stop after the current event iteration completes.
- """
- raise NotImplementedError()
-
- def _setup_logging(self):
- """The IOLoop catches and logs exceptions, so it's
- important that log output be visible. However, python's
- default behavior for non-root loggers (prior to python
- 3.2) is to print an unhelpful "no handlers could be
- found" message rather than the actual log entry, so we
- must explicitly configure logging if we've made it this
- far without anything.
-
- This method should be called from start() in subclasses.
- """
- if not any([logging.getLogger().handlers,
- logging.getLogger('tornado').handlers,
- logging.getLogger('tornado.application').handlers]):
- logging.basicConfig()
-
- def stop(self):
- """Stop the I/O loop.
-
- If the event loop is not currently running, the next call to `start()`
- will return immediately.
-
- To use asynchronous methods from otherwise-synchronous code (such as
- unit tests), you can start and stop the event loop like this::
-
- ioloop = IOLoop()
- async_method(ioloop=ioloop, callback=ioloop.stop)
- ioloop.start()
-
- ``ioloop.start()`` will return after ``async_method`` has run
- its callback, whether that callback was invoked before or
- after ``ioloop.start``.
-
- Note that even after `stop` has been called, the `IOLoop` is not
- completely stopped until `IOLoop.start` has also returned.
- Some work that was scheduled before the call to `stop` may still
- be run before the `IOLoop` shuts down.
- """
- raise NotImplementedError()
-
- def run_sync(self, func, timeout=None):
- """Starts the `IOLoop`, runs the given function, and stops the loop.
-
- The function must return either a yieldable object or
- ``None``. If the function returns a yieldable object, the
- `IOLoop` will run until the yieldable is resolved (and
- `run_sync()` will return the yieldable's result). If it raises
- an exception, the `IOLoop` will stop and the exception will be
- re-raised to the caller.
-
- The keyword-only argument ``timeout`` may be used to set
- a maximum duration for the function. If the timeout expires,
- a `TimeoutError` is raised.
-
- This method is useful in conjunction with `tornado.gen.coroutine`
- to allow asynchronous calls in a ``main()`` function::
-
- @gen.coroutine
- def main():
- # do stuff...
-
- if __name__ == '__main__':
- IOLoop.current().run_sync(main)
-
- .. versionchanged:: 4.3
- Returning a non-``None``, non-yieldable value is now an error.
- """
- future_cell = [None]
-
- def run():
- try:
- result = func()
- if result is not None:
- from tornado.gen import convert_yielded
- result = convert_yielded(result)
- except Exception:
- future_cell[0] = TracebackFuture()
- future_cell[0].set_exc_info(sys.exc_info())
- else:
- if is_future(result):
- future_cell[0] = result
- else:
- future_cell[0] = TracebackFuture()
- future_cell[0].set_result(result)
- self.add_future(future_cell[0], lambda future: self.stop())
- self.add_callback(run)
- if timeout is not None:
- timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
- self.start()
- if timeout is not None:
- self.remove_timeout(timeout_handle)
- if not future_cell[0].done():
- raise TimeoutError('Operation timed out after %s seconds' % timeout)
- return future_cell[0].result()
-
- def time(self):
- """Returns the current time according to the `IOLoop`'s clock.
-
- The return value is a floating-point number relative to an
- unspecified time in the past.
-
- By default, the `IOLoop`'s time function is `time.time`. However,
- it may be configured to use e.g. `time.monotonic` instead.
- Calls to `add_timeout` that pass a number instead of a
- `datetime.timedelta` should use this function to compute the
- appropriate time, so they can work no matter what time function
- is chosen.
- """
- return time.time()
-
- def add_timeout(self, deadline, callback, *args, **kwargs):
- """Runs the ``callback`` at the time ``deadline`` from the I/O loop.
-
- Returns an opaque handle that may be passed to
- `remove_timeout` to cancel.
-
- ``deadline`` may be a number denoting a time (on the same
- scale as `IOLoop.time`, normally `time.time`), or a
- `datetime.timedelta` object for a deadline relative to the
- current time. Since Tornado 4.0, `call_later` is a more
- convenient alternative for the relative case since it does not
- require a timedelta object.
-
- Note that it is not safe to call `add_timeout` from other threads.
- Instead, you must use `add_callback` to transfer control to the
- `IOLoop`'s thread, and then call `add_timeout` from there.
-
- Subclasses of IOLoop must implement either `add_timeout` or
- `call_at`; the default implementations of each will call
- the other. `call_at` is usually easier to implement, but
- subclasses that wish to maintain compatibility with Tornado
- versions prior to 4.0 must use `add_timeout` instead.
-
- .. versionchanged:: 4.0
- Now passes through ``*args`` and ``**kwargs`` to the callback.
- """
- if isinstance(deadline, numbers.Real):
- return self.call_at(deadline, callback, *args, **kwargs)
- elif isinstance(deadline, datetime.timedelta):
- return self.call_at(self.time() + timedelta_to_seconds(deadline),
- callback, *args, **kwargs)
- else:
- raise TypeError("Unsupported deadline %r" % deadline)
-
- def call_later(self, delay, callback, *args, **kwargs):
- """Runs the ``callback`` after ``delay`` seconds have passed.
-
- Returns an opaque handle that may be passed to `remove_timeout`
- to cancel. Note that unlike the `asyncio` method of the same
- name, the returned object does not have a ``cancel()`` method.
-
- See `add_timeout` for comments on thread-safety and subclassing.
-
- .. versionadded:: 4.0
- """
- return self.call_at(self.time() + delay, callback, *args, **kwargs)
-
- def call_at(self, when, callback, *args, **kwargs):
- """Runs the ``callback`` at the absolute time designated by ``when``.
-
- ``when`` must be a number using the same reference point as
- `IOLoop.time`.
-
- Returns an opaque handle that may be passed to `remove_timeout`
- to cancel. Note that unlike the `asyncio` method of the same
- name, the returned object does not have a ``cancel()`` method.
-
- See `add_timeout` for comments on thread-safety and subclassing.
-
- .. versionadded:: 4.0
- """
- return self.add_timeout(when, callback, *args, **kwargs)
-
- def remove_timeout(self, timeout):
- """Cancels a pending timeout.
-
- The argument is a handle as returned by `add_timeout`. It is
- safe to call `remove_timeout` even if the callback has already
- been run.
- """
- raise NotImplementedError()
-
- def add_callback(self, callback, *args, **kwargs):
- """Calls the given callback on the next I/O loop iteration.
-
- It is safe to call this method from any thread at any time,
- except from a signal handler. Note that this is the **only**
- method in `IOLoop` that makes this thread-safety guarantee; all
- other interaction with the `IOLoop` must be done from that
- `IOLoop`'s thread. `add_callback()` may be used to transfer
- control from other threads to the `IOLoop`'s thread.
-
- To add a callback from a signal handler, see
- `add_callback_from_signal`.
- """
- raise NotImplementedError()
-
- def add_callback_from_signal(self, callback, *args, **kwargs):
- """Calls the given callback on the next I/O loop iteration.
-
- Safe for use from a Python signal handler; should not be used
- otherwise.
-
- Callbacks added with this method will be run without any
- `.stack_context`, to avoid picking up the context of the function
- that was interrupted by the signal.
- """
- raise NotImplementedError()
-
- def spawn_callback(self, callback, *args, **kwargs):
- """Calls the given callback on the next IOLoop iteration.
-
- Unlike all other callback-related methods on IOLoop,
- ``spawn_callback`` does not associate the callback with its caller's
- ``stack_context``, so it is suitable for fire-and-forget callbacks
- that should not interfere with the caller.
-
- .. versionadded:: 4.0
- """
- with stack_context.NullContext():
- self.add_callback(callback, *args, **kwargs)
-
- def add_future(self, future, callback):
- """Schedules a callback on the ``IOLoop`` when the given
- `.Future` is finished.
-
- The callback is invoked with one argument, the
- `.Future`.
- """
- assert is_future(future)
- callback = stack_context.wrap(callback)
- future.add_done_callback(
- lambda future: self.add_callback(callback, future))
-
- def _run_callback(self, callback):
- """Runs a callback with error handling.
-
- For use in subclasses.
- """
- try:
- ret = callback()
- if ret is not None:
- from tornado import gen
- # Functions that return Futures typically swallow all
- # exceptions and store them in the Future. If a Future
- # makes it out to the IOLoop, ensure its exception (if any)
- # gets logged too.
- try:
- ret = gen.convert_yielded(ret)
- except gen.BadYieldError:
- # It's not unusual for add_callback to be used with
- # methods returning a non-None and non-yieldable
- # result, which should just be ignored.
- pass
- else:
- self.add_future(ret, self._discard_future_result)
- except Exception:
- self.handle_callback_exception(callback)
-
- def _discard_future_result(self, future):
- """Avoid unhandled-exception warnings from spawned coroutines."""
- future.result()
-
- def handle_callback_exception(self, callback):
- """This method is called whenever a callback run by the `IOLoop`
- throws an exception.
-
- By default simply logs the exception as an error. Subclasses
- may override this method to customize reporting of exceptions.
-
- The exception itself is not passed explicitly, but is available
- in `sys.exc_info`.
- """
- app_log.error("Exception in callback %r", callback, exc_info=True)
-
- def split_fd(self, fd):
- """Returns an (fd, obj) pair from an ``fd`` parameter.
-
- We accept both raw file descriptors and file-like objects as
- input to `add_handler` and related methods. When a file-like
- object is passed, we must retain the object itself so we can
- close it correctly when the `IOLoop` shuts down, but the
- poller interfaces favor file descriptors (they will accept
- file-like objects and call ``fileno()`` for you, but they
- always return the descriptor itself).
-
- This method is provided for use by `IOLoop` subclasses and should
- not generally be used by application code.
-
- .. versionadded:: 4.0
- """
- try:
- return fd.fileno(), fd
- except AttributeError:
- return fd, fd
-
- def close_fd(self, fd):
- """Utility method to close an ``fd``.
-
- If ``fd`` is a file-like object, we close it directly; otherwise
- we use `os.close`.
-
- This method is provided for use by `IOLoop` subclasses (in
- implementations of ``IOLoop.close(all_fds=True)`` and should
- not generally be used by application code.
-
- .. versionadded:: 4.0
- """
- try:
- try:
- fd.close()
- except AttributeError:
- os.close(fd)
- except OSError:
- pass
-
-
-class PollIOLoop(IOLoop):
- """Base class for IOLoops built around a select-like function.
-
- For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
- (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
- `tornado.platform.select.SelectIOLoop` (all platforms).
- """
- def initialize(self, impl, time_func=None, **kwargs):
- super(PollIOLoop, self).initialize(**kwargs)
- self._impl = impl
- if hasattr(self._impl, 'fileno'):
- set_close_exec(self._impl.fileno())
- self.time_func = time_func or time.time
- self._handlers = {}
- self._events = {}
- self._callbacks = collections.deque()
- self._timeouts = []
- self._cancellations = 0
- self._running = False
- self._stopped = False
- self._closing = False
- self._thread_ident = None
- self._blocking_signal_threshold = None
- self._timeout_counter = itertools.count()
-
- # Create a pipe that we send bogus data to when we want to wake
- # the I/O loop when it is idle
- self._waker = Waker()
- self.add_handler(self._waker.fileno(),
- lambda fd, events: self._waker.consume(),
- self.READ)
-
- def close(self, all_fds=False):
- self._closing = True
- self.remove_handler(self._waker.fileno())
- if all_fds:
- for fd, handler in list(self._handlers.values()):
- self.close_fd(fd)
- self._waker.close()
- self._impl.close()
- self._callbacks = None
- self._timeouts = None
-
- def add_handler(self, fd, handler, events):
- fd, obj = self.split_fd(fd)
- self._handlers[fd] = (obj, stack_context.wrap(handler))
- self._impl.register(fd, events | self.ERROR)
-
- def update_handler(self, fd, events):
- fd, obj = self.split_fd(fd)
- self._impl.modify(fd, events | self.ERROR)
-
- def remove_handler(self, fd):
- fd, obj = self.split_fd(fd)
- self._handlers.pop(fd, None)
- self._events.pop(fd, None)
- try:
- self._impl.unregister(fd)
- except Exception:
- gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
-
- def set_blocking_signal_threshold(self, seconds, action):
- if not hasattr(signal, "setitimer"):
- gen_log.error("set_blocking_signal_threshold requires a signal module "
- "with the setitimer method")
- return
- self._blocking_signal_threshold = seconds
- if seconds is not None:
- signal.signal(signal.SIGALRM,
- action if action is not None else signal.SIG_DFL)
-
- def start(self):
- if self._running:
- raise RuntimeError("IOLoop is already running")
- self._setup_logging()
- if self._stopped:
- self._stopped = False
- return
- old_current = getattr(IOLoop._current, "instance", None)
- IOLoop._current.instance = self
- self._thread_ident = thread.get_ident()
- self._running = True
-
- # signal.set_wakeup_fd closes a race condition in event loops:
- # a signal may arrive at the beginning of select/poll/etc
- # before it goes into its interruptible sleep, so the signal
- # will be consumed without waking the select. The solution is
- # for the (C, synchronous) signal handler to write to a pipe,
- # which will then be seen by select.
- #
- # In python's signal handling semantics, this only matters on the
- # main thread (fortunately, set_wakeup_fd only works on the main
- # thread and will raise a ValueError otherwise).
- #
- # If someone has already set a wakeup fd, we don't want to
- # disturb it. This is an issue for twisted, which does its
- # SIGCHLD processing in response to its own wakeup fd being
- # written to. As long as the wakeup fd is registered on the IOLoop,
- # the loop will still wake up and everything should work.
- old_wakeup_fd = None
- if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
- # requires python 2.6+, unix. set_wakeup_fd exists but crashes
- # the python process on windows.
- try:
- old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
- if old_wakeup_fd != -1:
- # Already set, restore previous value. This is a little racy,
- # but there's no clean get_wakeup_fd and in real use the
- # IOLoop is just started once at the beginning.
- signal.set_wakeup_fd(old_wakeup_fd)
- old_wakeup_fd = None
- except ValueError:
- # Non-main thread, or the previous value of wakeup_fd
- # is no longer valid.
- old_wakeup_fd = None
-
- try:
- while True:
- # Prevent IO event starvation by delaying new callbacks
- # to the next iteration of the event loop.
- ncallbacks = len(self._callbacks)
-
- # Add any timeouts that have come due to the callback list.
- # Do not run anything until we have determined which ones
- # are ready, so timeouts that call add_timeout cannot
- # schedule anything in this iteration.
- due_timeouts = []
- if self._timeouts:
- now = self.time()
- while self._timeouts:
- if self._timeouts[0].callback is None:
- # The timeout was cancelled. Note that the
- # cancellation check is repeated below for timeouts
- # that are cancelled by another timeout or callback.
- heapq.heappop(self._timeouts)
- self._cancellations -= 1
- elif self._timeouts[0].deadline <= now:
- due_timeouts.append(heapq.heappop(self._timeouts))
- else:
- break
- if (self._cancellations > 512 and
- self._cancellations > (len(self._timeouts) >> 1)):
- # Clean up the timeout queue when it gets large and it's
- # more than half cancellations.
- self._cancellations = 0
- self._timeouts = [x for x in self._timeouts
- if x.callback is not None]
- heapq.heapify(self._timeouts)
-
- for i in range(ncallbacks):
- self._run_callback(self._callbacks.popleft())
- for timeout in due_timeouts:
- if timeout.callback is not None:
- self._run_callback(timeout.callback)
- # Closures may be holding on to a lot of memory, so allow
- # them to be freed before we go into our poll wait.
- due_timeouts = timeout = None
-
- if self._callbacks:
- # If any callbacks or timeouts called add_callback,
- # we don't want to wait in poll() before we run them.
- poll_timeout = 0.0
- elif self._timeouts:
- # If there are any timeouts, schedule the first one.
- # Use self.time() instead of 'now' to account for time
- # spent running callbacks.
- poll_timeout = self._timeouts[0].deadline - self.time()
- poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
- else:
- # No timeouts and no callbacks, so use the default.
- poll_timeout = _POLL_TIMEOUT
-
- if not self._running:
- break
-
- if self._blocking_signal_threshold is not None:
- # clear alarm so it doesn't fire while poll is waiting for
- # events.
- signal.setitimer(signal.ITIMER_REAL, 0, 0)
-
- try:
- event_pairs = self._impl.poll(poll_timeout)
- except Exception as e:
- # Depending on python version and IOLoop implementation,
- # different exception types may be thrown and there are
- # two ways EINTR might be signaled:
- # * e.errno == errno.EINTR
- # * e.args is like (errno.EINTR, 'Interrupted system call')
- if errno_from_exception(e) == errno.EINTR:
- continue
- else:
- raise
-
- if self._blocking_signal_threshold is not None:
- signal.setitimer(signal.ITIMER_REAL,
- self._blocking_signal_threshold, 0)
-
- # Pop one fd at a time from the set of pending fds and run
- # its handler. Since that handler may perform actions on
- # other file descriptors, there may be reentrant calls to
- # this IOLoop that modify self._events
- self._events.update(event_pairs)
- while self._events:
- fd, events = self._events.popitem()
- try:
- fd_obj, handler_func = self._handlers[fd]
- handler_func(fd_obj, events)
- except (OSError, IOError) as e:
- if errno_from_exception(e) == errno.EPIPE:
- # Happens when the client closes the connection
- pass
- else:
- self.handle_callback_exception(self._handlers.get(fd))
- except Exception:
- self.handle_callback_exception(self._handlers.get(fd))
- fd_obj = handler_func = None
-
- finally:
- # reset the stopped flag so another start/stop pair can be issued
- self._stopped = False
- if self._blocking_signal_threshold is not None:
- signal.setitimer(signal.ITIMER_REAL, 0, 0)
- IOLoop._current.instance = old_current
- if old_wakeup_fd is not None:
- signal.set_wakeup_fd(old_wakeup_fd)
-
- def stop(self):
- self._running = False
- self._stopped = True
- self._waker.wake()
-
- def time(self):
- return self.time_func()
-
- def call_at(self, deadline, callback, *args, **kwargs):
- timeout = _Timeout(
- deadline,
- functools.partial(stack_context.wrap(callback), *args, **kwargs),
- self)
- heapq.heappush(self._timeouts, timeout)
- return timeout
-
- def remove_timeout(self, timeout):
- # Removing from a heap is complicated, so just leave the defunct
- # timeout object in the queue (see discussion in
- # http://docs.python.org/library/heapq.html).
- # If this turns out to be a problem, we could add a garbage
- # collection pass whenever there are too many dead timeouts.
- timeout.callback = None
- self._cancellations += 1
-
- def add_callback(self, callback, *args, **kwargs):
- if self._closing:
- return
- # Blindly insert into self._callbacks. This is safe even
- # from signal handlers because deque.append is atomic.
- self._callbacks.append(functools.partial(
- stack_context.wrap(callback), *args, **kwargs))
- if thread.get_ident() != self._thread_ident:
- # This will write one byte but Waker.consume() reads many
- # at once, so it's ok to write even when not strictly
- # necessary.
- self._waker.wake()
- else:
- # If we're on the IOLoop's thread, we don't need to wake anyone.
- pass
-
- def add_callback_from_signal(self, callback, *args, **kwargs):
- with stack_context.NullContext():
- self.add_callback(callback, *args, **kwargs)
-
-
-class _Timeout(object):
- """An IOLoop timeout, a UNIX timestamp and a callback"""
-
- # Reduce memory overhead when there are lots of pending callbacks
- __slots__ = ['deadline', 'callback', 'tdeadline']
-
- def __init__(self, deadline, callback, io_loop):
- if not isinstance(deadline, numbers.Real):
- raise TypeError("Unsupported deadline %r" % deadline)
- self.deadline = deadline
- self.callback = callback
- self.tdeadline = (deadline, next(io_loop._timeout_counter))
-
- # Comparison methods to sort by deadline, with object id as a tiebreaker
- # to guarantee a consistent ordering. The heapq module uses __le__
- # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
- # use __lt__).
- def __lt__(self, other):
- return self.tdeadline < other.tdeadline
-
- def __le__(self, other):
- return self.tdeadline <= other.tdeadline
-
-
-class PeriodicCallback(object):
- """Schedules the given callback to be called periodically.
-
- The callback is called every ``callback_time`` milliseconds.
- Note that the timeout is given in milliseconds, while most other
- time-related functions in Tornado use seconds.
-
- If the callback runs for longer than ``callback_time`` milliseconds,
- subsequent invocations will be skipped to get back on schedule.
-
- `start` must be called after the `PeriodicCallback` is created.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- def __init__(self, callback, callback_time, io_loop=None):
- self.callback = callback
- if callback_time <= 0:
- raise ValueError("Periodic callback must have a positive callback_time")
- self.callback_time = callback_time
- self.io_loop = io_loop or IOLoop.current()
- self._running = False
- self._timeout = None
-
- def start(self):
- """Starts the timer."""
- self._running = True
- self._next_timeout = self.io_loop.time()
- self._schedule_next()
-
- def stop(self):
- """Stops the timer."""
- self._running = False
- if self._timeout is not None:
- self.io_loop.remove_timeout(self._timeout)
- self._timeout = None
-
- def is_running(self):
- """Return True if this `.PeriodicCallback` has been started.
-
- .. versionadded:: 4.1
- """
- return self._running
-
- def _run(self):
- if not self._running:
- return
- try:
- return self.callback()
- except Exception:
- self.io_loop.handle_callback_exception(self.callback)
- finally:
- self._schedule_next()
-
- def _schedule_next(self):
- if self._running:
- current_time = self.io_loop.time()
-
- if self._next_timeout <= current_time:
- callback_time_sec = self.callback_time / 1000.0
- self._next_timeout += (math.floor((current_time - self._next_timeout) /
- callback_time_sec) + 1) * callback_time_sec
-
- self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""An I/O event loop for non-blocking sockets.
+
+Typical applications will use a single `IOLoop` object, in the
+`IOLoop.instance` singleton. The `IOLoop.start` method should usually
+be called at the end of the ``main()`` function. Atypical applications may
+use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
+case.
+
+In addition to I/O events, the `IOLoop` can also schedule time-based events.
+`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+import datetime
+import errno
+import functools
+import heapq
+import itertools
+import logging
+import numbers
+import os
+import select
+import sys
+import threading
+import time
+import traceback
+import math
+
+from tornado.concurrent import TracebackFuture, is_future
+from tornado.log import app_log, gen_log
+from tornado.platform.auto import set_close_exec, Waker
+from tornado import stack_context
+from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+
+if PY3:
+ import _thread as thread
+else:
+ import thread
+
+
+_POLL_TIMEOUT = 3600.0
+
+
+class TimeoutError(Exception):
+ pass
+
+
+class IOLoop(Configurable):
+ """A level-triggered I/O loop.
+
+ We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
+ are available, or else we fall back on select(). If you are
+ implementing a system that needs to handle thousands of
+ simultaneous connections, you should use a system that supports
+ either ``epoll`` or ``kqueue``.
+
+ Example usage for a simple TCP server:
+
+ .. testcode::
+
+ import errno
+ import functools
+ import tornado.ioloop
+ import socket
+
+ def connection_ready(sock, fd, events):
+ while True:
+ try:
+ connection, address = sock.accept()
+ except socket.error as e:
+ if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
+ raise
+ return
+ connection.setblocking(0)
+ handle_connection(connection, address)
+
+ if __name__ == '__main__':
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.setblocking(0)
+ sock.bind(("", port))
+ sock.listen(128)
+
+ io_loop = tornado.ioloop.IOLoop.current()
+ callback = functools.partial(connection_ready, sock)
+ io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
+ io_loop.start()
+
+ .. testoutput::
+ :hide:
+
+ By default, a newly-constructed `IOLoop` becomes the thread's current
+ `IOLoop`, unless there already is a current `IOLoop`. This behavior
+ can be controlled with the ``make_current`` argument to the `IOLoop`
+ constructor: if ``make_current=True``, the new `IOLoop` will always
+ try to become current and it raises an error if there is already a
+ current instance. If ``make_current=False``, the new `IOLoop` will
+ not try to become current.
+
+ .. versionchanged:: 4.2
+ Added the ``make_current`` keyword argument to the `IOLoop`
+ constructor.
+ """
+ # Constants from the epoll module
+ _EPOLLIN = 0x001
+ _EPOLLPRI = 0x002
+ _EPOLLOUT = 0x004
+ _EPOLLERR = 0x008
+ _EPOLLHUP = 0x010
+ _EPOLLRDHUP = 0x2000
+ _EPOLLONESHOT = (1 << 30)
+ _EPOLLET = (1 << 31)
+
+ # Our events map exactly to the epoll events
+ NONE = 0
+ READ = _EPOLLIN
+ WRITE = _EPOLLOUT
+ ERROR = _EPOLLERR | _EPOLLHUP
+
+ # Global lock for creating global IOLoop instance
+ _instance_lock = threading.Lock()
+
+ _current = threading.local()
+
+ @staticmethod
+ def instance():
+ """Returns a global `IOLoop` instance.
+
+ Most applications have a single, global `IOLoop` running on the
+ main thread. Use this method to get this instance from
+ another thread. In most other cases, it is better to use `current()`
+ to get the current thread's `IOLoop`.
+ """
+ if not hasattr(IOLoop, "_instance"):
+ with IOLoop._instance_lock:
+ if not hasattr(IOLoop, "_instance"):
+ # New instance after double check
+ IOLoop._instance = IOLoop()
+ return IOLoop._instance
+
+ @staticmethod
+ def initialized():
+ """Returns true if the singleton instance has been created."""
+ return hasattr(IOLoop, "_instance")
+
+ def install(self):
+ """Installs this `IOLoop` object as the singleton instance.
+
+ This is normally not necessary as `instance()` will create
+ an `IOLoop` on demand, but you may want to call `install` to use
+ a custom subclass of `IOLoop`.
+
+ When using an `IOLoop` subclass, `install` must be called prior
+ to creating any objects that implicitly create their own
+ `IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
+ """
+ assert not IOLoop.initialized()
+ IOLoop._instance = self
+
+ @staticmethod
+ def clear_instance():
+ """Clear the global `IOLoop` instance.
+
+ .. versionadded:: 4.0
+ """
+ if hasattr(IOLoop, "_instance"):
+ del IOLoop._instance
+
+ @staticmethod
+ def current(instance=True):
+ """Returns the current thread's `IOLoop`.
+
+ If an `IOLoop` is currently running or has been marked as
+ current by `make_current`, returns that instance. If there is
+ no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
+ main thread's `IOLoop`, creating one if necessary) if ``instance``
+ is true.
+
+ In general you should use `IOLoop.current` as the default when
+ constructing an asynchronous object, and use `IOLoop.instance`
+ when you mean to communicate to the main thread from a different
+ one.
+
+ .. versionchanged:: 4.1
+ Added ``instance`` argument to control the fallback to
+ `IOLoop.instance()`.
+ """
+ current = getattr(IOLoop._current, "instance", None)
+ if current is None and instance:
+ return IOLoop.instance()
+ return current
+
+ def make_current(self):
+ """Makes this the `IOLoop` for the current thread.
+
+ An `IOLoop` automatically becomes current for its thread
+ when it is started, but it is sometimes useful to call
+ `make_current` explicitly before starting the `IOLoop`,
+ so that code run at startup time can find the right
+ instance.
+
+ .. versionchanged:: 4.1
+ An `IOLoop` created while there is no current `IOLoop`
+ will automatically become current.
+ """
+ IOLoop._current.instance = self
+
+ @staticmethod
+ def clear_current():
+ IOLoop._current.instance = None
+
+ @classmethod
+ def configurable_base(cls):
+ return IOLoop
+
+ @classmethod
+ def configurable_default(cls):
+ if hasattr(select, "epoll"):
+ from tornado.platform.epoll import EPollIOLoop
+ return EPollIOLoop
+ if hasattr(select, "kqueue"):
+ # Python 2.6+ on BSD or Mac
+ from tornado.platform.kqueue import KQueueIOLoop
+ return KQueueIOLoop
+ from tornado.platform.select import SelectIOLoop
+ return SelectIOLoop
+
+ def initialize(self, make_current=None):
+ if make_current is None:
+ if IOLoop.current(instance=False) is None:
+ self.make_current()
+ elif make_current:
+ if IOLoop.current(instance=False) is not None:
+ raise RuntimeError("current IOLoop already exists")
+ self.make_current()
+
+ def close(self, all_fds=False):
+ """Closes the `IOLoop`, freeing any resources used.
+
+ If ``all_fds`` is true, all file descriptors registered on the
+ IOLoop will be closed (not just the ones created by the
+ `IOLoop` itself).
+
+ Many applications will only use a single `IOLoop` that runs for the
+ entire lifetime of the process. In that case closing the `IOLoop`
+ is not necessary since everything will be cleaned up when the
+ process exits. `IOLoop.close` is provided mainly for scenarios
+ such as unit tests, which create and destroy a large number of
+ ``IOLoops``.
+
+ An `IOLoop` must be completely stopped before it can be closed. This
+ means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
+ be allowed to return before attempting to call `IOLoop.close()`.
+ Therefore the call to `close` will usually appear just after
+ the call to `start` rather than near the call to `stop`.
+
+ .. versionchanged:: 3.1
+ If the `IOLoop` implementation supports non-integer objects
+ for "file descriptors", those objects will have their
+ ``close`` method when ``all_fds`` is true.
+ """
+ raise NotImplementedError()
+
+ def add_handler(self, fd, handler, events):
+ """Registers the given handler to receive the given events for ``fd``.
+
+ The ``fd`` argument may either be an integer file descriptor or
+ a file-like object with a ``fileno()`` method (and optionally a
+ ``close()`` method, which may be called when the `IOLoop` is shut
+ down).
+
+ The ``events`` argument is a bitwise or of the constants
+ ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
+
+ When an event occurs, ``handler(fd, events)`` will be run.
+
+ .. versionchanged:: 4.0
+ Added the ability to pass file-like objects in addition to
+ raw file descriptors.
+ """
+ raise NotImplementedError()
+
+ def update_handler(self, fd, events):
+ """Changes the events we listen for ``fd``.
+
+ .. versionchanged:: 4.0
+ Added the ability to pass file-like objects in addition to
+ raw file descriptors.
+ """
+ raise NotImplementedError()
+
+ def remove_handler(self, fd):
+ """Stop listening for events on ``fd``.
+
+ .. versionchanged:: 4.0
+ Added the ability to pass file-like objects in addition to
+ raw file descriptors.
+ """
+ raise NotImplementedError()
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ """Sends a signal if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
+ platform.
+
+ The action parameter is a Python signal handler. Read the
+ documentation for the `signal` module for more information.
+ If ``action`` is None, the process will be killed if it is
+ blocked for too long.
+ """
+ raise NotImplementedError()
+
+ def set_blocking_log_threshold(self, seconds):
+ """Logs a stack trace if the `IOLoop` is blocked for more than
+ ``s`` seconds.
+
+ Equivalent to ``set_blocking_signal_threshold(seconds,
+ self.log_stack)``
+ """
+ self.set_blocking_signal_threshold(seconds, self.log_stack)
+
+ def log_stack(self, signal, frame):
+ """Signal handler to log the stack trace of the current thread.
+
+ For use with `set_blocking_signal_threshold`.
+ """
+ gen_log.warning('IOLoop blocked for %f seconds in\n%s',
+ self._blocking_signal_threshold,
+ ''.join(traceback.format_stack(frame)))
+
+ def start(self):
+ """Starts the I/O loop.
+
+ The loop will run until one of the callbacks calls `stop()`, which
+ will make the loop stop after the current event iteration completes.
+ """
+ raise NotImplementedError()
+
+ def _setup_logging(self):
+ """The IOLoop catches and logs exceptions, so it's
+ important that log output be visible. However, python's
+ default behavior for non-root loggers (prior to python
+ 3.2) is to print an unhelpful "no handlers could be
+ found" message rather than the actual log entry, so we
+ must explicitly configure logging if we've made it this
+ far without anything.
+
+ This method should be called from start() in subclasses.
+ """
+ if not any([logging.getLogger().handlers,
+ logging.getLogger('tornado').handlers,
+ logging.getLogger('tornado.application').handlers]):
+ logging.basicConfig()
+
+ def stop(self):
+ """Stop the I/O loop.
+
+ If the event loop is not currently running, the next call to `start()`
+ will return immediately.
+
+ To use asynchronous methods from otherwise-synchronous code (such as
+ unit tests), you can start and stop the event loop like this::
+
+ ioloop = IOLoop()
+ async_method(ioloop=ioloop, callback=ioloop.stop)
+ ioloop.start()
+
+ ``ioloop.start()`` will return after ``async_method`` has run
+ its callback, whether that callback was invoked before or
+ after ``ioloop.start``.
+
+ Note that even after `stop` has been called, the `IOLoop` is not
+ completely stopped until `IOLoop.start` has also returned.
+ Some work that was scheduled before the call to `stop` may still
+ be run before the `IOLoop` shuts down.
+ """
+ raise NotImplementedError()
+
+ def run_sync(self, func, timeout=None):
+ """Starts the `IOLoop`, runs the given function, and stops the loop.
+
+ The function must return either a yieldable object or
+ ``None``. If the function returns a yieldable object, the
+ `IOLoop` will run until the yieldable is resolved (and
+ `run_sync()` will return the yieldable's result). If it raises
+ an exception, the `IOLoop` will stop and the exception will be
+ re-raised to the caller.
+
+ The keyword-only argument ``timeout`` may be used to set
+ a maximum duration for the function. If the timeout expires,
+ a `TimeoutError` is raised.
+
+ This method is useful in conjunction with `tornado.gen.coroutine`
+ to allow asynchronous calls in a ``main()`` function::
+
+ @gen.coroutine
+ def main():
+ # do stuff...
+
+ if __name__ == '__main__':
+ IOLoop.current().run_sync(main)
+
+ .. versionchanged:: 4.3
+ Returning a non-``None``, non-yieldable value is now an error.
+ """
+ future_cell = [None]
+
+ def run():
+ try:
+ result = func()
+ if result is not None:
+ from tornado.gen import convert_yielded
+ result = convert_yielded(result)
+ except Exception:
+ future_cell[0] = TracebackFuture()
+ future_cell[0].set_exc_info(sys.exc_info())
+ else:
+ if is_future(result):
+ future_cell[0] = result
+ else:
+ future_cell[0] = TracebackFuture()
+ future_cell[0].set_result(result)
+ self.add_future(future_cell[0], lambda future: self.stop())
+ self.add_callback(run)
+ if timeout is not None:
+ timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
+ self.start()
+ if timeout is not None:
+ self.remove_timeout(timeout_handle)
+ if not future_cell[0].done():
+ raise TimeoutError('Operation timed out after %s seconds' % timeout)
+ return future_cell[0].result()
+
+ def time(self):
+ """Returns the current time according to the `IOLoop`'s clock.
+
+ The return value is a floating-point number relative to an
+ unspecified time in the past.
+
+ By default, the `IOLoop`'s time function is `time.time`. However,
+ it may be configured to use e.g. `time.monotonic` instead.
+ Calls to `add_timeout` that pass a number instead of a
+ `datetime.timedelta` should use this function to compute the
+ appropriate time, so they can work no matter what time function
+ is chosen.
+ """
+ return time.time()
+
+ def add_timeout(self, deadline, callback, *args, **kwargs):
+ """Runs the ``callback`` at the time ``deadline`` from the I/O loop.
+
+ Returns an opaque handle that may be passed to
+ `remove_timeout` to cancel.
+
+ ``deadline`` may be a number denoting a time (on the same
+ scale as `IOLoop.time`, normally `time.time`), or a
+ `datetime.timedelta` object for a deadline relative to the
+ current time. Since Tornado 4.0, `call_later` is a more
+ convenient alternative for the relative case since it does not
+ require a timedelta object.
+
+ Note that it is not safe to call `add_timeout` from other threads.
+ Instead, you must use `add_callback` to transfer control to the
+ `IOLoop`'s thread, and then call `add_timeout` from there.
+
+ Subclasses of IOLoop must implement either `add_timeout` or
+ `call_at`; the default implementations of each will call
+ the other. `call_at` is usually easier to implement, but
+ subclasses that wish to maintain compatibility with Tornado
+ versions prior to 4.0 must use `add_timeout` instead.
+
+ .. versionchanged:: 4.0
+ Now passes through ``*args`` and ``**kwargs`` to the callback.
+ """
+ if isinstance(deadline, numbers.Real):
+ return self.call_at(deadline, callback, *args, **kwargs)
+ elif isinstance(deadline, datetime.timedelta):
+ return self.call_at(self.time() + timedelta_to_seconds(deadline),
+ callback, *args, **kwargs)
+ else:
+ raise TypeError("Unsupported deadline %r" % deadline)
+
+ def call_later(self, delay, callback, *args, **kwargs):
+ """Runs the ``callback`` after ``delay`` seconds have passed.
+
+ Returns an opaque handle that may be passed to `remove_timeout`
+ to cancel. Note that unlike the `asyncio` method of the same
+ name, the returned object does not have a ``cancel()`` method.
+
+ See `add_timeout` for comments on thread-safety and subclassing.
+
+ .. versionadded:: 4.0
+ """
+ return self.call_at(self.time() + delay, callback, *args, **kwargs)
+
+ def call_at(self, when, callback, *args, **kwargs):
+ """Runs the ``callback`` at the absolute time designated by ``when``.
+
+ ``when`` must be a number using the same reference point as
+ `IOLoop.time`.
+
+ Returns an opaque handle that may be passed to `remove_timeout`
+ to cancel. Note that unlike the `asyncio` method of the same
+ name, the returned object does not have a ``cancel()`` method.
+
+ See `add_timeout` for comments on thread-safety and subclassing.
+
+ .. versionadded:: 4.0
+ """
+ return self.add_timeout(when, callback, *args, **kwargs)
+
+ def remove_timeout(self, timeout):
+ """Cancels a pending timeout.
+
+ The argument is a handle as returned by `add_timeout`. It is
+ safe to call `remove_timeout` even if the callback has already
+ been run.
+ """
+ raise NotImplementedError()
+
+ def add_callback(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ It is safe to call this method from any thread at any time,
+ except from a signal handler. Note that this is the **only**
+ method in `IOLoop` that makes this thread-safety guarantee; all
+ other interaction with the `IOLoop` must be done from that
+ `IOLoop`'s thread. `add_callback()` may be used to transfer
+ control from other threads to the `IOLoop`'s thread.
+
+ To add a callback from a signal handler, see
+ `add_callback_from_signal`.
+ """
+ raise NotImplementedError()
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ """Calls the given callback on the next I/O loop iteration.
+
+ Safe for use from a Python signal handler; should not be used
+ otherwise.
+
+ Callbacks added with this method will be run without any
+ `.stack_context`, to avoid picking up the context of the function
+ that was interrupted by the signal.
+ """
+ raise NotImplementedError()
+
+ def spawn_callback(self, callback, *args, **kwargs):
+ """Calls the given callback on the next IOLoop iteration.
+
+ Unlike all other callback-related methods on IOLoop,
+ ``spawn_callback`` does not associate the callback with its caller's
+ ``stack_context``, so it is suitable for fire-and-forget callbacks
+ that should not interfere with the caller.
+
+ .. versionadded:: 4.0
+ """
+ with stack_context.NullContext():
+ self.add_callback(callback, *args, **kwargs)
+
+ def add_future(self, future, callback):
+ """Schedules a callback on the ``IOLoop`` when the given
+ `.Future` is finished.
+
+ The callback is invoked with one argument, the
+ `.Future`.
+ """
+ assert is_future(future)
+ callback = stack_context.wrap(callback)
+ future.add_done_callback(
+ lambda future: self.add_callback(callback, future))
+
+ def _run_callback(self, callback):
+ """Runs a callback with error handling.
+
+ For use in subclasses.
+ """
+ try:
+ ret = callback()
+ if ret is not None:
+ from tornado import gen
+ # Functions that return Futures typically swallow all
+ # exceptions and store them in the Future. If a Future
+ # makes it out to the IOLoop, ensure its exception (if any)
+ # gets logged too.
+ try:
+ ret = gen.convert_yielded(ret)
+ except gen.BadYieldError:
+ # It's not unusual for add_callback to be used with
+ # methods returning a non-None and non-yieldable
+ # result, which should just be ignored.
+ pass
+ else:
+ self.add_future(ret, self._discard_future_result)
+ except Exception:
+ self.handle_callback_exception(callback)
+
+ def _discard_future_result(self, future):
+ """Avoid unhandled-exception warnings from spawned coroutines."""
+ future.result()
+
+ def handle_callback_exception(self, callback):
+ """This method is called whenever a callback run by the `IOLoop`
+ throws an exception.
+
+ By default simply logs the exception as an error. Subclasses
+ may override this method to customize reporting of exceptions.
+
+ The exception itself is not passed explicitly, but is available
+ in `sys.exc_info`.
+ """
+ app_log.error("Exception in callback %r", callback, exc_info=True)
+
+ def split_fd(self, fd):
+ """Returns an (fd, obj) pair from an ``fd`` parameter.
+
+ We accept both raw file descriptors and file-like objects as
+ input to `add_handler` and related methods. When a file-like
+ object is passed, we must retain the object itself so we can
+ close it correctly when the `IOLoop` shuts down, but the
+ poller interfaces favor file descriptors (they will accept
+ file-like objects and call ``fileno()`` for you, but they
+ always return the descriptor itself).
+
+ This method is provided for use by `IOLoop` subclasses and should
+ not generally be used by application code.
+
+ .. versionadded:: 4.0
+ """
+ try:
+ return fd.fileno(), fd
+ except AttributeError:
+ return fd, fd
+
+ def close_fd(self, fd):
+ """Utility method to close an ``fd``.
+
+ If ``fd`` is a file-like object, we close it directly; otherwise
+ we use `os.close`.
+
+ This method is provided for use by `IOLoop` subclasses (in
+ implementations of ``IOLoop.close(all_fds=True)`` and should
+ not generally be used by application code.
+
+ .. versionadded:: 4.0
+ """
+ try:
+ try:
+ fd.close()
+ except AttributeError:
+ os.close(fd)
+ except OSError:
+ pass
+
+
+class PollIOLoop(IOLoop):
+ """Base class for IOLoops built around a select-like function.
+
+ For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
+ (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
+ `tornado.platform.select.SelectIOLoop` (all platforms).
+ """
+ def initialize(self, impl, time_func=None, **kwargs):
+ super(PollIOLoop, self).initialize(**kwargs)
+ self._impl = impl
+ if hasattr(self._impl, 'fileno'):
+ set_close_exec(self._impl.fileno())
+ self.time_func = time_func or time.time
+ self._handlers = {}
+ self._events = {}
+ self._callbacks = collections.deque()
+ self._timeouts = []
+ self._cancellations = 0
+ self._running = False
+ self._stopped = False
+ self._closing = False
+ self._thread_ident = None
+ self._blocking_signal_threshold = None
+ self._timeout_counter = itertools.count()
+
+ # Create a pipe that we send bogus data to when we want to wake
+ # the I/O loop when it is idle
+ self._waker = Waker()
+ self.add_handler(self._waker.fileno(),
+ lambda fd, events: self._waker.consume(),
+ self.READ)
+
+ def close(self, all_fds=False):
+ self._closing = True
+ self.remove_handler(self._waker.fileno())
+ if all_fds:
+ for fd, handler in list(self._handlers.values()):
+ self.close_fd(fd)
+ self._waker.close()
+ self._impl.close()
+ self._callbacks = None
+ self._timeouts = None
+
+ def add_handler(self, fd, handler, events):
+ fd, obj = self.split_fd(fd)
+ self._handlers[fd] = (obj, stack_context.wrap(handler))
+ self._impl.register(fd, events | self.ERROR)
+
+ def update_handler(self, fd, events):
+ fd, obj = self.split_fd(fd)
+ self._impl.modify(fd, events | self.ERROR)
+
+ def remove_handler(self, fd):
+ fd, obj = self.split_fd(fd)
+ self._handlers.pop(fd, None)
+ self._events.pop(fd, None)
+ try:
+ self._impl.unregister(fd)
+ except Exception:
+ gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
+
+ def set_blocking_signal_threshold(self, seconds, action):
+ if not hasattr(signal, "setitimer"):
+ gen_log.error("set_blocking_signal_threshold requires a signal module "
+ "with the setitimer method")
+ return
+ self._blocking_signal_threshold = seconds
+ if seconds is not None:
+ signal.signal(signal.SIGALRM,
+ action if action is not None else signal.SIG_DFL)
+
+ def start(self):
+ if self._running:
+ raise RuntimeError("IOLoop is already running")
+ self._setup_logging()
+ if self._stopped:
+ self._stopped = False
+ return
+ old_current = getattr(IOLoop._current, "instance", None)
+ IOLoop._current.instance = self
+ self._thread_ident = thread.get_ident()
+ self._running = True
+
+ # signal.set_wakeup_fd closes a race condition in event loops:
+ # a signal may arrive at the beginning of select/poll/etc
+ # before it goes into its interruptible sleep, so the signal
+ # will be consumed without waking the select. The solution is
+ # for the (C, synchronous) signal handler to write to a pipe,
+ # which will then be seen by select.
+ #
+ # In python's signal handling semantics, this only matters on the
+ # main thread (fortunately, set_wakeup_fd only works on the main
+ # thread and will raise a ValueError otherwise).
+ #
+ # If someone has already set a wakeup fd, we don't want to
+ # disturb it. This is an issue for twisted, which does its
+ # SIGCHLD processing in response to its own wakeup fd being
+ # written to. As long as the wakeup fd is registered on the IOLoop,
+ # the loop will still wake up and everything should work.
+ old_wakeup_fd = None
+ if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
+ # requires python 2.6+, unix. set_wakeup_fd exists but crashes
+ # the python process on windows.
+ try:
+ old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
+ if old_wakeup_fd != -1:
+ # Already set, restore previous value. This is a little racy,
+ # but there's no clean get_wakeup_fd and in real use the
+ # IOLoop is just started once at the beginning.
+ signal.set_wakeup_fd(old_wakeup_fd)
+ old_wakeup_fd = None
+ except ValueError:
+ # Non-main thread, or the previous value of wakeup_fd
+ # is no longer valid.
+ old_wakeup_fd = None
+
+ try:
+ while True:
+ # Prevent IO event starvation by delaying new callbacks
+ # to the next iteration of the event loop.
+ ncallbacks = len(self._callbacks)
+
+ # Add any timeouts that have come due to the callback list.
+ # Do not run anything until we have determined which ones
+ # are ready, so timeouts that call add_timeout cannot
+ # schedule anything in this iteration.
+ due_timeouts = []
+ if self._timeouts:
+ now = self.time()
+ while self._timeouts:
+ if self._timeouts[0].callback is None:
+ # The timeout was cancelled. Note that the
+ # cancellation check is repeated below for timeouts
+ # that are cancelled by another timeout or callback.
+ heapq.heappop(self._timeouts)
+ self._cancellations -= 1
+ elif self._timeouts[0].deadline <= now:
+ due_timeouts.append(heapq.heappop(self._timeouts))
+ else:
+ break
+ if (self._cancellations > 512 and
+ self._cancellations > (len(self._timeouts) >> 1)):
+ # Clean up the timeout queue when it gets large and it's
+ # more than half cancellations.
+ self._cancellations = 0
+ self._timeouts = [x for x in self._timeouts
+ if x.callback is not None]
+ heapq.heapify(self._timeouts)
+
+ for i in range(ncallbacks):
+ self._run_callback(self._callbacks.popleft())
+ for timeout in due_timeouts:
+ if timeout.callback is not None:
+ self._run_callback(timeout.callback)
+ # Closures may be holding on to a lot of memory, so allow
+ # them to be freed before we go into our poll wait.
+ due_timeouts = timeout = None
+
+ if self._callbacks:
+ # If any callbacks or timeouts called add_callback,
+ # we don't want to wait in poll() before we run them.
+ poll_timeout = 0.0
+ elif self._timeouts:
+ # If there are any timeouts, schedule the first one.
+ # Use self.time() instead of 'now' to account for time
+ # spent running callbacks.
+ poll_timeout = self._timeouts[0].deadline - self.time()
+ poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
+ else:
+ # No timeouts and no callbacks, so use the default.
+ poll_timeout = _POLL_TIMEOUT
+
+ if not self._running:
+ break
+
+ if self._blocking_signal_threshold is not None:
+ # clear alarm so it doesn't fire while poll is waiting for
+ # events.
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+
+ try:
+ event_pairs = self._impl.poll(poll_timeout)
+ except Exception as e:
+ # Depending on python version and IOLoop implementation,
+ # different exception types may be thrown and there are
+ # two ways EINTR might be signaled:
+ # * e.errno == errno.EINTR
+ # * e.args is like (errno.EINTR, 'Interrupted system call')
+ if errno_from_exception(e) == errno.EINTR:
+ continue
+ else:
+ raise
+
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL,
+ self._blocking_signal_threshold, 0)
+
+ # Pop one fd at a time from the set of pending fds and run
+ # its handler. Since that handler may perform actions on
+ # other file descriptors, there may be reentrant calls to
+ # this IOLoop that modify self._events
+ self._events.update(event_pairs)
+ while self._events:
+ fd, events = self._events.popitem()
+ try:
+ fd_obj, handler_func = self._handlers[fd]
+ handler_func(fd_obj, events)
+ except (OSError, IOError) as e:
+ if errno_from_exception(e) == errno.EPIPE:
+ # Happens when the client closes the connection
+ pass
+ else:
+ self.handle_callback_exception(self._handlers.get(fd))
+ except Exception:
+ self.handle_callback_exception(self._handlers.get(fd))
+ fd_obj = handler_func = None
+
+ finally:
+ # reset the stopped flag so another start/stop pair can be issued
+ self._stopped = False
+ if self._blocking_signal_threshold is not None:
+ signal.setitimer(signal.ITIMER_REAL, 0, 0)
+ IOLoop._current.instance = old_current
+ if old_wakeup_fd is not None:
+ signal.set_wakeup_fd(old_wakeup_fd)
+
+ def stop(self):
+ self._running = False
+ self._stopped = True
+ self._waker.wake()
+
+ def time(self):
+ return self.time_func()
+
+ def call_at(self, deadline, callback, *args, **kwargs):
+ timeout = _Timeout(
+ deadline,
+ functools.partial(stack_context.wrap(callback), *args, **kwargs),
+ self)
+ heapq.heappush(self._timeouts, timeout)
+ return timeout
+
+ def remove_timeout(self, timeout):
+ # Removing from a heap is complicated, so just leave the defunct
+ # timeout object in the queue (see discussion in
+ # http://docs.python.org/library/heapq.html).
+ # If this turns out to be a problem, we could add a garbage
+ # collection pass whenever there are too many dead timeouts.
+ timeout.callback = None
+ self._cancellations += 1
+
+ def add_callback(self, callback, *args, **kwargs):
+ if self._closing:
+ return
+ # Blindly insert into self._callbacks. This is safe even
+ # from signal handlers because deque.append is atomic.
+ self._callbacks.append(functools.partial(
+ stack_context.wrap(callback), *args, **kwargs))
+ if thread.get_ident() != self._thread_ident:
+ # This will write one byte but Waker.consume() reads many
+ # at once, so it's ok to write even when not strictly
+ # necessary.
+ self._waker.wake()
+ else:
+ # If we're on the IOLoop's thread, we don't need to wake anyone.
+ pass
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ with stack_context.NullContext():
+ self.add_callback(callback, *args, **kwargs)
+
+
+class _Timeout(object):
+ """An IOLoop timeout, a UNIX timestamp and a callback"""
+
+ # Reduce memory overhead when there are lots of pending callbacks
+ __slots__ = ['deadline', 'callback', 'tdeadline']
+
+ def __init__(self, deadline, callback, io_loop):
+ if not isinstance(deadline, numbers.Real):
+ raise TypeError("Unsupported deadline %r" % deadline)
+ self.deadline = deadline
+ self.callback = callback
+ self.tdeadline = (deadline, next(io_loop._timeout_counter))
+
+ # Comparison methods to sort by deadline, with object id as a tiebreaker
+ # to guarantee a consistent ordering. The heapq module uses __le__
+ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
+ # use __lt__).
+ def __lt__(self, other):
+ return self.tdeadline < other.tdeadline
+
+ def __le__(self, other):
+ return self.tdeadline <= other.tdeadline
+
+
+class PeriodicCallback(object):
+ """Schedules the given callback to be called periodically.
+
+ The callback is called every ``callback_time`` milliseconds.
+ Note that the timeout is given in milliseconds, while most other
+ time-related functions in Tornado use seconds.
+
+ If the callback runs for longer than ``callback_time`` milliseconds,
+ subsequent invocations will be skipped to get back on schedule.
+
+ `start` must be called after the `PeriodicCallback` is created.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ def __init__(self, callback, callback_time, io_loop=None):
+ self.callback = callback
+ if callback_time <= 0:
+ raise ValueError("Periodic callback must have a positive callback_time")
+ self.callback_time = callback_time
+ self.io_loop = io_loop or IOLoop.current()
+ self._running = False
+ self._timeout = None
+
+ def start(self):
+ """Starts the timer."""
+ self._running = True
+ self._next_timeout = self.io_loop.time()
+ self._schedule_next()
+
+ def stop(self):
+ """Stops the timer."""
+ self._running = False
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = None
+
+ def is_running(self):
+ """Return True if this `.PeriodicCallback` has been started.
+
+ .. versionadded:: 4.1
+ """
+ return self._running
+
+ def _run(self):
+ if not self._running:
+ return
+ try:
+ return self.callback()
+ except Exception:
+ self.io_loop.handle_callback_exception(self.callback)
+ finally:
+ self._schedule_next()
+
+ def _schedule_next(self):
+ if self._running:
+ current_time = self.io_loop.time()
+
+ if self._next_timeout <= current_time:
+ callback_time_sec = self.callback_time / 1000.0
+ self._next_timeout += (math.floor((current_time - self._next_timeout) /
+ callback_time_sec) + 1) * callback_time_sec
+
+ self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
diff --git a/contrib/python/tornado/tornado-4/tornado/iostream.py b/contrib/python/tornado/tornado-4/tornado/iostream.py
index 639ed5082b..359c831234 100644
--- a/contrib/python/tornado/tornado-4/tornado/iostream.py
+++ b/contrib/python/tornado/tornado-4/tornado/iostream.py
@@ -1,1568 +1,1568 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utility classes to write to and read from non-blocking files and sockets.
-
-Contents:
-
-* `BaseIOStream`: Generic interface for reading and writing.
-* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
-* `SSLIOStream`: SSL-aware version of IOStream.
-* `PipeIOStream`: Pipe-based IOStream implementation.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import collections
-import errno
-import numbers
-import os
-import socket
-import sys
-import re
-
-from tornado.concurrent import TracebackFuture
-from tornado import ioloop
-from tornado.log import gen_log, app_log
-from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults
-from tornado import stack_context
-from tornado.util import errno_from_exception
-
-try:
- from tornado.platform.posix import _set_nonblocking
-except ImportError:
- _set_nonblocking = None
-
-try:
- import ssl
-except ImportError:
- # ssl is not available on Google App Engine
- ssl = None
-
-# These errnos indicate that a non-blocking operation must be retried
-# at a later time. On most platforms they're the same value, but on
-# some they differ.
-_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
-
-if hasattr(errno, "WSAEWOULDBLOCK"):
- _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore
-
-# These errnos indicate that a connection has been abruptly terminated.
-# They should be caught and handled less noisily than other errors.
-_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
- errno.ETIMEDOUT)
-
-if hasattr(errno, "WSAECONNRESET"):
- _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore
-
-if sys.platform == 'darwin':
- # OSX appears to have a race condition that causes send(2) to return
- # EPROTOTYPE if called while a socket is being torn down:
- # http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
- # Since the socket is being closed anyway, treat this as an ECONNRESET
- # instead of an unexpected error.
- _ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore
-
-# More non-portable errnos:
-_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
-
-if hasattr(errno, "WSAEINPROGRESS"):
- _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) # type: ignore
-
-_WINDOWS = sys.platform.startswith('win')
-
-
-class StreamClosedError(IOError):
- """Exception raised by `IOStream` methods when the stream is closed.
-
- Note that the close callback is scheduled to run *after* other
- callbacks on the stream (to allow for buffered data to be processed),
- so you may see this error before you see the close callback.
-
- The ``real_error`` attribute contains the underlying error that caused
- the stream to close (if any).
-
- .. versionchanged:: 4.3
- Added the ``real_error`` attribute.
- """
- def __init__(self, real_error=None):
- super(StreamClosedError, self).__init__('Stream is closed')
- self.real_error = real_error
-
-
-class UnsatisfiableReadError(Exception):
- """Exception raised when a read cannot be satisfied.
-
- Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
- argument.
- """
- pass
-
-
-class StreamBufferFullError(Exception):
- """Exception raised by `IOStream` methods when the buffer is full.
- """
-
-
-class BaseIOStream(object):
- """A utility class to write to and read from a non-blocking file or socket.
-
- We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
- All of the methods take an optional ``callback`` argument and return a
- `.Future` only if no callback is given. When the operation completes,
- the callback will be run or the `.Future` will resolve with the data
- read (or ``None`` for ``write()``). All outstanding ``Futures`` will
- resolve with a `StreamClosedError` when the stream is closed; users
- of the callback interface will be notified via
- `.BaseIOStream.set_close_callback` instead.
-
- When a stream is closed due to an error, the IOStream's ``error``
- attribute contains the exception object.
-
- Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
- `read_from_fd`, and optionally `get_fd_error`.
- """
- def __init__(self, io_loop=None, max_buffer_size=None,
- read_chunk_size=None, max_write_buffer_size=None):
- """`BaseIOStream` constructor.
-
- :arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
- Deprecated since Tornado 4.1.
- :arg max_buffer_size: Maximum amount of incoming data to buffer;
- defaults to 100MB.
- :arg read_chunk_size: Amount of data to read at one time from the
- underlying transport; defaults to 64KB.
- :arg max_write_buffer_size: Amount of outgoing data to buffer;
- defaults to unlimited.
-
- .. versionchanged:: 4.0
- Add the ``max_write_buffer_size`` parameter. Changed default
- ``read_chunk_size`` to 64KB.
- """
- self.io_loop = io_loop or ioloop.IOLoop.current()
- self.max_buffer_size = max_buffer_size or 104857600
- # A chunk size that is too close to max_buffer_size can cause
- # spurious failures.
- self.read_chunk_size = min(read_chunk_size or 65536,
- self.max_buffer_size // 2)
- self.max_write_buffer_size = max_write_buffer_size
- self.error = None
- self._read_buffer = bytearray()
- self._read_buffer_pos = 0
- self._read_buffer_size = 0
- self._write_buffer = bytearray()
- self._write_buffer_pos = 0
- self._write_buffer_size = 0
- self._write_buffer_frozen = False
- self._total_write_index = 0
- self._total_write_done_index = 0
- self._pending_writes_while_frozen = []
- self._read_delimiter = None
- self._read_regex = None
- self._read_max_bytes = None
- self._read_bytes = None
- self._read_partial = False
- self._read_until_close = False
- self._read_callback = None
- self._read_future = None
- self._streaming_callback = None
- self._write_callback = None
- self._write_futures = collections.deque()
- self._close_callback = None
- self._connect_callback = None
- self._connect_future = None
- # _ssl_connect_future should be defined in SSLIOStream
- # but it's here so we can clean it up in maybe_run_close_callback.
- # TODO: refactor that so subclasses can add additional futures
- # to be cancelled.
- self._ssl_connect_future = None
- self._connecting = False
- self._state = None
- self._pending_callbacks = 0
- self._closed = False
-
- def fileno(self):
- """Returns the file descriptor for this stream."""
- raise NotImplementedError()
-
- def close_fd(self):
- """Closes the file underlying this stream.
-
- ``close_fd`` is called by `BaseIOStream` and should not be called
- elsewhere; other users should call `close` instead.
- """
- raise NotImplementedError()
-
- def write_to_fd(self, data):
- """Attempts to write ``data`` to the underlying file.
-
- Returns the number of bytes written.
- """
- raise NotImplementedError()
-
- def read_from_fd(self):
- """Attempts to read from the underlying file.
-
- Returns ``None`` if there was nothing to read (the socket
- returned `~errno.EWOULDBLOCK` or equivalent), otherwise
- returns the data. When possible, should return no more than
- ``self.read_chunk_size`` bytes at a time.
- """
- raise NotImplementedError()
-
- def get_fd_error(self):
- """Returns information about any error on the underlying file.
-
- This method is called after the `.IOLoop` has signaled an error on the
- file descriptor, and should return an Exception (such as `socket.error`
- with additional information, or None if no such information is
- available.
- """
- return None
-
- def read_until_regex(self, regex, callback=None, max_bytes=None):
- """Asynchronously read until we have matched the given regex.
-
- The result includes the data that matches the regex and anything
- that came before it. If a callback is given, it will be run
- with the data as an argument; if not, this method returns a
- `.Future`.
-
- If ``max_bytes`` is not None, the connection will be closed
- if more than ``max_bytes`` bytes have been read and the regex is
- not satisfied.
-
- .. versionchanged:: 4.0
- Added the ``max_bytes`` argument. The ``callback`` argument is
- now optional and a `.Future` will be returned if it is omitted.
- """
- future = self._set_read_callback(callback)
- self._read_regex = re.compile(regex)
- self._read_max_bytes = max_bytes
- try:
- self._try_inline_read()
- except UnsatisfiableReadError as e:
- # Handle this the same way as in _handle_events.
- gen_log.info("Unsatisfiable read, closing connection: %s" % e)
- self.close(exc_info=True)
- return future
- except:
- if future is not None:
- # Ensure that the future doesn't log an error because its
- # failure was never examined.
- future.add_done_callback(lambda f: f.exception())
- raise
- return future
-
- def read_until(self, delimiter, callback=None, max_bytes=None):
- """Asynchronously read until we have found the given delimiter.
-
- The result includes all the data read including the delimiter.
- If a callback is given, it will be run with the data as an argument;
- if not, this method returns a `.Future`.
-
- If ``max_bytes`` is not None, the connection will be closed
- if more than ``max_bytes`` bytes have been read and the delimiter
- is not found.
-
- .. versionchanged:: 4.0
- Added the ``max_bytes`` argument. The ``callback`` argument is
- now optional and a `.Future` will be returned if it is omitted.
- """
- future = self._set_read_callback(callback)
- self._read_delimiter = delimiter
- self._read_max_bytes = max_bytes
- try:
- self._try_inline_read()
- except UnsatisfiableReadError as e:
- # Handle this the same way as in _handle_events.
- gen_log.info("Unsatisfiable read, closing connection: %s" % e)
- self.close(exc_info=True)
- return future
- except:
- if future is not None:
- future.add_done_callback(lambda f: f.exception())
- raise
- return future
-
- def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
- partial=False):
- """Asynchronously read a number of bytes.
-
- If a ``streaming_callback`` is given, it will be called with chunks
- of data as they become available, and the final result will be empty.
- Otherwise, the result is all the data that was read.
- If a callback is given, it will be run with the data as an argument;
- if not, this method returns a `.Future`.
-
- If ``partial`` is true, the callback is run as soon as we have
- any bytes to return (but never more than ``num_bytes``)
-
- .. versionchanged:: 4.0
- Added the ``partial`` argument. The callback argument is now
- optional and a `.Future` will be returned if it is omitted.
- """
- future = self._set_read_callback(callback)
- assert isinstance(num_bytes, numbers.Integral)
- self._read_bytes = num_bytes
- self._read_partial = partial
- self._streaming_callback = stack_context.wrap(streaming_callback)
- try:
- self._try_inline_read()
- except:
- if future is not None:
- future.add_done_callback(lambda f: f.exception())
- raise
- return future
-
- def read_until_close(self, callback=None, streaming_callback=None):
- """Asynchronously reads all data from the socket until it is closed.
-
- If a ``streaming_callback`` is given, it will be called with chunks
- of data as they become available, and the final result will be empty.
- Otherwise, the result is all the data that was read.
- If a callback is given, it will be run with the data as an argument;
- if not, this method returns a `.Future`.
-
- Note that if a ``streaming_callback`` is used, data will be
- read from the socket as quickly as it becomes available; there
- is no way to apply backpressure or cancel the reads. If flow
- control or cancellation are desired, use a loop with
- `read_bytes(partial=True) <.read_bytes>` instead.
-
- .. versionchanged:: 4.0
- The callback argument is now optional and a `.Future` will
- be returned if it is omitted.
-
- """
- future = self._set_read_callback(callback)
- self._streaming_callback = stack_context.wrap(streaming_callback)
- if self.closed():
- if self._streaming_callback is not None:
- self._run_read_callback(self._read_buffer_size, True)
- self._run_read_callback(self._read_buffer_size, False)
- return future
- self._read_until_close = True
- try:
- self._try_inline_read()
- except:
- if future is not None:
- future.add_done_callback(lambda f: f.exception())
- raise
- return future
-
- def write(self, data, callback=None):
- """Asynchronously write the given data to this stream.
-
- If ``callback`` is given, we call it when all of the buffered write
- data has been successfully written to the stream. If there was
- previously buffered write data and an old write callback, that
- callback is simply overwritten with this new callback.
-
- If no ``callback`` is given, this method returns a `.Future` that
- resolves (with a result of ``None``) when the write has been
- completed.
-
- The ``data`` argument may be of type `bytes` or `memoryview`.
-
- .. versionchanged:: 4.0
- Now returns a `.Future` if no callback is given.
-
- .. versionchanged:: 4.5
- Added support for `memoryview` arguments.
- """
- self._check_closed()
- if data:
- if (self.max_write_buffer_size is not None and
- self._write_buffer_size + len(data) > self.max_write_buffer_size):
- raise StreamBufferFullError("Reached maximum write buffer size")
- if self._write_buffer_frozen:
- self._pending_writes_while_frozen.append(data)
- else:
- self._write_buffer += data
- self._write_buffer_size += len(data)
- self._total_write_index += len(data)
- if callback is not None:
- self._write_callback = stack_context.wrap(callback)
- future = None
- else:
- future = TracebackFuture()
- future.add_done_callback(lambda f: f.exception())
- self._write_futures.append((self._total_write_index, future))
- if not self._connecting:
- self._handle_write()
- if self._write_buffer_size:
- self._add_io_state(self.io_loop.WRITE)
- self._maybe_add_error_listener()
- return future
-
- def set_close_callback(self, callback):
- """Call the given callback when the stream is closed.
-
- This is not necessary for applications that use the `.Future`
- interface; all outstanding ``Futures`` will resolve with a
- `StreamClosedError` when the stream is closed.
- """
- self._close_callback = stack_context.wrap(callback)
- self._maybe_add_error_listener()
-
- def close(self, exc_info=False):
- """Close this stream.
-
- If ``exc_info`` is true, set the ``error`` attribute to the current
- exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
- use that instead of `sys.exc_info`).
- """
- if not self.closed():
- if exc_info:
- if not isinstance(exc_info, tuple):
- exc_info = sys.exc_info()
- if any(exc_info):
- self.error = exc_info[1]
- if self._read_until_close:
- if (self._streaming_callback is not None and
- self._read_buffer_size):
- self._run_read_callback(self._read_buffer_size, True)
- self._read_until_close = False
- self._run_read_callback(self._read_buffer_size, False)
- if self._state is not None:
- self.io_loop.remove_handler(self.fileno())
- self._state = None
- self.close_fd()
- self._closed = True
- self._maybe_run_close_callback()
-
- def _maybe_run_close_callback(self):
- # If there are pending callbacks, don't run the close callback
- # until they're done (see _maybe_add_error_handler)
- if self.closed() and self._pending_callbacks == 0:
- futures = []
- if self._read_future is not None:
- futures.append(self._read_future)
- self._read_future = None
- futures += [future for _, future in self._write_futures]
- self._write_futures.clear()
- if self._connect_future is not None:
- futures.append(self._connect_future)
- self._connect_future = None
- if self._ssl_connect_future is not None:
- futures.append(self._ssl_connect_future)
- self._ssl_connect_future = None
- for future in futures:
- future.set_exception(StreamClosedError(real_error=self.error))
- if self._close_callback is not None:
- cb = self._close_callback
- self._close_callback = None
- self._run_callback(cb)
- # Delete any unfinished callbacks to break up reference cycles.
- self._read_callback = self._write_callback = None
- # Clear the buffers so they can be cleared immediately even
- # if the IOStream object is kept alive by a reference cycle.
- # TODO: Clear the read buffer too; it currently breaks some tests.
- self._write_buffer = None
- self._write_buffer_size = 0
-
- def reading(self):
- """Returns true if we are currently reading from the stream."""
- return self._read_callback is not None or self._read_future is not None
-
- def writing(self):
- """Returns true if we are currently writing to the stream."""
- return self._write_buffer_size > 0
-
- def closed(self):
- """Returns true if the stream has been closed."""
- return self._closed
-
- def set_nodelay(self, value):
- """Sets the no-delay flag for this stream.
-
- By default, data written to TCP streams may be held for a time
- to make the most efficient use of bandwidth (according to
- Nagle's algorithm). The no-delay flag requests that data be
- written as soon as possible, even if doing so would consume
- additional bandwidth.
-
- This flag is currently defined only for TCP-based ``IOStreams``.
-
- .. versionadded:: 3.1
- """
- pass
-
- def _handle_events(self, fd, events):
- if self.closed():
- gen_log.warning("Got events for closed stream %s", fd)
- return
- try:
- if self._connecting:
- # Most IOLoops will report a write failed connect
- # with the WRITE event, but SelectIOLoop reports a
- # READ as well so we must check for connecting before
- # either.
- self._handle_connect()
- if self.closed():
- return
- if events & self.io_loop.READ:
- self._handle_read()
- if self.closed():
- return
- if events & self.io_loop.WRITE:
- self._handle_write()
- if self.closed():
- return
- if events & self.io_loop.ERROR:
- self.error = self.get_fd_error()
- # We may have queued up a user callback in _handle_read or
- # _handle_write, so don't close the IOStream until those
- # callbacks have had a chance to run.
- self.io_loop.add_callback(self.close)
- return
- state = self.io_loop.ERROR
- if self.reading():
- state |= self.io_loop.READ
- if self.writing():
- state |= self.io_loop.WRITE
- if state == self.io_loop.ERROR and self._read_buffer_size == 0:
- # If the connection is idle, listen for reads too so
- # we can tell if the connection is closed. If there is
- # data in the read buffer we won't run the close callback
- # yet anyway, so we don't need to listen in this case.
- state |= self.io_loop.READ
- if state != self._state:
- assert self._state is not None, \
- "shouldn't happen: _handle_events without self._state"
- self._state = state
- self.io_loop.update_handler(self.fileno(), self._state)
- except UnsatisfiableReadError as e:
- gen_log.info("Unsatisfiable read, closing connection: %s" % e)
- self.close(exc_info=True)
- except Exception:
- gen_log.error("Uncaught exception, closing connection.",
- exc_info=True)
- self.close(exc_info=True)
- raise
-
- def _run_callback(self, callback, *args):
- def wrapper():
- self._pending_callbacks -= 1
- try:
- return callback(*args)
- except Exception:
- app_log.error("Uncaught exception, closing connection.",
- exc_info=True)
- # Close the socket on an uncaught exception from a user callback
- # (It would eventually get closed when the socket object is
- # gc'd, but we don't want to rely on gc happening before we
- # run out of file descriptors)
- self.close(exc_info=True)
- # Re-raise the exception so that IOLoop.handle_callback_exception
- # can see it and log the error
- raise
- finally:
- self._maybe_add_error_listener()
- # We schedule callbacks to be run on the next IOLoop iteration
- # rather than running them directly for several reasons:
- # * Prevents unbounded stack growth when a callback calls an
- # IOLoop operation that immediately runs another callback
- # * Provides a predictable execution context for e.g.
- # non-reentrant mutexes
- # * Ensures that the try/except in wrapper() is run outside
- # of the application's StackContexts
- with stack_context.NullContext():
- # stack_context was already captured in callback, we don't need to
- # capture it again for IOStream's wrapper. This is especially
- # important if the callback was pre-wrapped before entry to
- # IOStream (as in HTTPConnection._header_callback), as we could
- # capture and leak the wrong context here.
- self._pending_callbacks += 1
- self.io_loop.add_callback(wrapper)
-
- def _read_to_buffer_loop(self):
- # This method is called from _handle_read and _try_inline_read.
- try:
- if self._read_bytes is not None:
- target_bytes = self._read_bytes
- elif self._read_max_bytes is not None:
- target_bytes = self._read_max_bytes
- elif self.reading():
- # For read_until without max_bytes, or
- # read_until_close, read as much as we can before
- # scanning for the delimiter.
- target_bytes = None
- else:
- target_bytes = 0
- next_find_pos = 0
- # Pretend to have a pending callback so that an EOF in
- # _read_to_buffer doesn't trigger an immediate close
- # callback. At the end of this method we'll either
- # establish a real pending callback via
- # _read_from_buffer or run the close callback.
- #
- # We need two try statements here so that
- # pending_callbacks is decremented before the `except`
- # clause below (which calls `close` and does need to
- # trigger the callback)
- self._pending_callbacks += 1
- while not self.closed():
- # Read from the socket until we get EWOULDBLOCK or equivalent.
- # SSL sockets do some internal buffering, and if the data is
- # sitting in the SSL object's buffer select() and friends
- # can't see it; the only way to find out if it's there is to
- # try to read it.
- if self._read_to_buffer() == 0:
- break
-
- self._run_streaming_callback()
-
- # If we've read all the bytes we can use, break out of
- # this loop. We can't just call read_from_buffer here
- # because of subtle interactions with the
- # pending_callback and error_listener mechanisms.
- #
- # If we've reached target_bytes, we know we're done.
- if (target_bytes is not None and
- self._read_buffer_size >= target_bytes):
- break
-
- # Otherwise, we need to call the more expensive find_read_pos.
- # It's inefficient to do this on every read, so instead
- # do it on the first read and whenever the read buffer
- # size has doubled.
- if self._read_buffer_size >= next_find_pos:
- pos = self._find_read_pos()
- if pos is not None:
- return pos
- next_find_pos = self._read_buffer_size * 2
- return self._find_read_pos()
- finally:
- self._pending_callbacks -= 1
-
- def _handle_read(self):
- try:
- pos = self._read_to_buffer_loop()
- except UnsatisfiableReadError:
- raise
- except Exception as e:
- gen_log.warning("error on read: %s" % e)
- self.close(exc_info=True)
- return
- if pos is not None:
- self._read_from_buffer(pos)
- return
- else:
- self._maybe_run_close_callback()
-
- def _set_read_callback(self, callback):
- assert self._read_callback is None, "Already reading"
- assert self._read_future is None, "Already reading"
- if callback is not None:
- self._read_callback = stack_context.wrap(callback)
- else:
- self._read_future = TracebackFuture()
- return self._read_future
-
- def _run_read_callback(self, size, streaming):
- if streaming:
- callback = self._streaming_callback
- else:
- callback = self._read_callback
- self._read_callback = self._streaming_callback = None
- if self._read_future is not None:
- assert callback is None
- future = self._read_future
- self._read_future = None
- future.set_result(self._consume(size))
- if callback is not None:
- assert (self._read_future is None) or streaming
- self._run_callback(callback, self._consume(size))
- else:
- # If we scheduled a callback, we will add the error listener
- # afterwards. If we didn't, we have to do it now.
- self._maybe_add_error_listener()
-
- def _try_inline_read(self):
- """Attempt to complete the current read operation from buffered data.
-
- If the read can be completed without blocking, schedules the
- read callback on the next IOLoop iteration; otherwise starts
- listening for reads on the socket.
- """
- # See if we've already got the data from a previous read
- self._run_streaming_callback()
- pos = self._find_read_pos()
- if pos is not None:
- self._read_from_buffer(pos)
- return
- self._check_closed()
- try:
- pos = self._read_to_buffer_loop()
- except Exception:
- # If there was an in _read_to_buffer, we called close() already,
- # but couldn't run the close callback because of _pending_callbacks.
- # Before we escape from this function, run the close callback if
- # applicable.
- self._maybe_run_close_callback()
- raise
- if pos is not None:
- self._read_from_buffer(pos)
- return
- # We couldn't satisfy the read inline, so either close the stream
- # or listen for new data.
- if self.closed():
- self._maybe_run_close_callback()
- else:
- self._add_io_state(ioloop.IOLoop.READ)
-
- def _read_to_buffer(self):
- """Reads from the socket and appends the result to the read buffer.
-
- Returns the number of bytes read. Returns 0 if there is nothing
- to read (i.e. the read returns EWOULDBLOCK or equivalent). On
- error closes the socket and raises an exception.
- """
- while True:
- try:
- chunk = self.read_from_fd()
- except (socket.error, IOError, OSError) as e:
- if errno_from_exception(e) == errno.EINTR:
- continue
- # ssl.SSLError is a subclass of socket.error
- if self._is_connreset(e):
- # Treat ECONNRESET as a connection close rather than
- # an error to minimize log spam (the exception will
- # be available on self.error for apps that care).
- self.close(exc_info=True)
- return
- self.close(exc_info=True)
- raise
- break
- if chunk is None:
- return 0
- self._read_buffer += chunk
- self._read_buffer_size += len(chunk)
- if self._read_buffer_size > self.max_buffer_size:
- gen_log.error("Reached maximum read buffer size")
- self.close()
- raise StreamBufferFullError("Reached maximum read buffer size")
- return len(chunk)
-
- def _run_streaming_callback(self):
- if self._streaming_callback is not None and self._read_buffer_size:
- bytes_to_consume = self._read_buffer_size
- if self._read_bytes is not None:
- bytes_to_consume = min(self._read_bytes, bytes_to_consume)
- self._read_bytes -= bytes_to_consume
- self._run_read_callback(bytes_to_consume, True)
-
- def _read_from_buffer(self, pos):
- """Attempts to complete the currently-pending read from the buffer.
-
- The argument is either a position in the read buffer or None,
- as returned by _find_read_pos.
- """
- self._read_bytes = self._read_delimiter = self._read_regex = None
- self._read_partial = False
- self._run_read_callback(pos, False)
-
- def _find_read_pos(self):
- """Attempts to find a position in the read buffer that satisfies
- the currently-pending read.
-
- Returns a position in the buffer if the current read can be satisfied,
- or None if it cannot.
- """
- if (self._read_bytes is not None and
- (self._read_buffer_size >= self._read_bytes or
- (self._read_partial and self._read_buffer_size > 0))):
- num_bytes = min(self._read_bytes, self._read_buffer_size)
- return num_bytes
- elif self._read_delimiter is not None:
- # Multi-byte delimiters (e.g. '\r\n') may straddle two
- # chunks in the read buffer, so we can't easily find them
- # without collapsing the buffer. However, since protocols
- # using delimited reads (as opposed to reads of a known
- # length) tend to be "line" oriented, the delimiter is likely
- # to be in the first few chunks. Merge the buffer gradually
- # since large merges are relatively expensive and get undone in
- # _consume().
- if self._read_buffer:
- loc = self._read_buffer.find(self._read_delimiter,
- self._read_buffer_pos)
- if loc != -1:
- loc -= self._read_buffer_pos
- delimiter_len = len(self._read_delimiter)
- self._check_max_bytes(self._read_delimiter,
- loc + delimiter_len)
- return loc + delimiter_len
- self._check_max_bytes(self._read_delimiter,
- self._read_buffer_size)
- elif self._read_regex is not None:
- if self._read_buffer:
- m = self._read_regex.search(self._read_buffer,
- self._read_buffer_pos)
- if m is not None:
- loc = m.end() - self._read_buffer_pos
- self._check_max_bytes(self._read_regex, loc)
- return loc
- self._check_max_bytes(self._read_regex, self._read_buffer_size)
- return None
-
- def _check_max_bytes(self, delimiter, size):
- if (self._read_max_bytes is not None and
- size > self._read_max_bytes):
- raise UnsatisfiableReadError(
- "delimiter %r not found within %d bytes" % (
- delimiter, self._read_max_bytes))
-
- def _freeze_write_buffer(self, size):
- self._write_buffer_frozen = size
-
- def _unfreeze_write_buffer(self):
- self._write_buffer_frozen = False
- self._write_buffer += b''.join(self._pending_writes_while_frozen)
- self._write_buffer_size += sum(map(len, self._pending_writes_while_frozen))
- self._pending_writes_while_frozen[:] = []
-
- def _got_empty_write(self, size):
- """
- Called when a non-blocking write() failed writing anything.
- Can be overridden in subclasses.
- """
-
- def _handle_write(self):
- while self._write_buffer_size:
- assert self._write_buffer_size >= 0
- try:
- start = self._write_buffer_pos
- if self._write_buffer_frozen:
- size = self._write_buffer_frozen
- elif _WINDOWS:
- # On windows, socket.send blows up if given a
- # write buffer that's too large, instead of just
- # returning the number of bytes it was able to
- # process. Therefore we must not call socket.send
- # with more than 128KB at a time.
- size = 128 * 1024
- else:
- size = self._write_buffer_size
- num_bytes = self.write_to_fd(
- memoryview(self._write_buffer)[start:start + size])
- if num_bytes == 0:
- self._got_empty_write(size)
- break
- self._write_buffer_pos += num_bytes
- self._write_buffer_size -= num_bytes
- # Amortized O(1) shrink
- # (this heuristic is implemented natively in Python 3.4+
- # but is replicated here for Python 2)
- if self._write_buffer_pos > self._write_buffer_size:
- del self._write_buffer[:self._write_buffer_pos]
- self._write_buffer_pos = 0
- if self._write_buffer_frozen:
- self._unfreeze_write_buffer()
- self._total_write_done_index += num_bytes
- except (socket.error, IOError, OSError) as e:
- if e.args[0] in _ERRNO_WOULDBLOCK:
- self._got_empty_write(size)
- break
- else:
- if not self._is_connreset(e):
- # Broken pipe errors are usually caused by connection
- # reset, and its better to not log EPIPE errors to
- # minimize log spam
- gen_log.warning("Write error on %s: %s",
- self.fileno(), e)
- self.close(exc_info=True)
- return
-
- while self._write_futures:
- index, future = self._write_futures[0]
- if index > self._total_write_done_index:
- break
- self._write_futures.popleft()
- future.set_result(None)
-
- if not self._write_buffer_size:
- if self._write_callback:
- callback = self._write_callback
- self._write_callback = None
- self._run_callback(callback)
-
- def _consume(self, loc):
- # Consume loc bytes from the read buffer and return them
- if loc == 0:
- return b""
- assert loc <= self._read_buffer_size
- # Slice the bytearray buffer into bytes, without intermediate copying
- b = (memoryview(self._read_buffer)
- [self._read_buffer_pos:self._read_buffer_pos + loc]
- ).tobytes()
- self._read_buffer_pos += loc
- self._read_buffer_size -= loc
- # Amortized O(1) shrink
- # (this heuristic is implemented natively in Python 3.4+
- # but is replicated here for Python 2)
- if self._read_buffer_pos > self._read_buffer_size:
- del self._read_buffer[:self._read_buffer_pos]
- self._read_buffer_pos = 0
- return b
-
- def _check_closed(self):
- if self.closed():
- raise StreamClosedError(real_error=self.error)
-
- def _maybe_add_error_listener(self):
- # This method is part of an optimization: to detect a connection that
- # is closed when we're not actively reading or writing, we must listen
- # for read events. However, it is inefficient to do this when the
- # connection is first established because we are going to read or write
- # immediately anyway. Instead, we insert checks at various times to
- # see if the connection is idle and add the read listener then.
- if self._pending_callbacks != 0:
- return
- if self._state is None or self._state == ioloop.IOLoop.ERROR:
- if self.closed():
- self._maybe_run_close_callback()
- elif (self._read_buffer_size == 0 and
- self._close_callback is not None):
- self._add_io_state(ioloop.IOLoop.READ)
-
- def _add_io_state(self, state):
- """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
-
- Implementation notes: Reads and writes have a fast path and a
- slow path. The fast path reads synchronously from socket
- buffers, while the slow path uses `_add_io_state` to schedule
- an IOLoop callback. Note that in both cases, the callback is
- run asynchronously with `_run_callback`.
-
- To detect closed connections, we must have called
- `_add_io_state` at some point, but we want to delay this as
- much as possible so we don't have to set an `IOLoop.ERROR`
- listener that will be overwritten by the next slow-path
- operation. As long as there are callbacks scheduled for
- fast-path ops, those callbacks may do more reads.
- If a sequence of fast-path ops do not end in a slow-path op,
- (e.g. for an @asynchronous long-poll request), we must add
- the error handler. This is done in `_run_callback` and `write`
- (since the write callback is optional so we can have a
- fast-path write with no `_run_callback`)
- """
- if self.closed():
- # connection has been closed, so there can be no future events
- return
- if self._state is None:
- self._state = ioloop.IOLoop.ERROR | state
- with stack_context.NullContext():
- self.io_loop.add_handler(
- self.fileno(), self._handle_events, self._state)
- elif not self._state & state:
- self._state = self._state | state
- self.io_loop.update_handler(self.fileno(), self._state)
-
- def _is_connreset(self, exc):
- """Return true if exc is ECONNRESET or equivalent.
-
- May be overridden in subclasses.
- """
- return (isinstance(exc, (socket.error, IOError)) and
- errno_from_exception(exc) in _ERRNO_CONNRESET)
-
-
-class IOStream(BaseIOStream):
- r"""Socket-based `IOStream` implementation.
-
- This class supports the read and write methods from `BaseIOStream`
- plus a `connect` method.
-
- The ``socket`` parameter may either be connected or unconnected.
- For server operations the socket is the result of calling
- `socket.accept <socket.socket.accept>`. For client operations the
- socket is created with `socket.socket`, and may either be
- connected before passing it to the `IOStream` or connected with
- `IOStream.connect`.
-
- A very simple (and broken) HTTP client using this class:
-
- .. testcode::
-
- import tornado.ioloop
- import tornado.iostream
- import socket
-
- def send_request():
- stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
- stream.read_until(b"\r\n\r\n", on_headers)
-
- def on_headers(data):
- headers = {}
- for line in data.split(b"\r\n"):
- parts = line.split(b":")
- if len(parts) == 2:
- headers[parts[0].strip()] = parts[1].strip()
- stream.read_bytes(int(headers[b"Content-Length"]), on_body)
-
- def on_body(data):
- print(data)
- stream.close()
- tornado.ioloop.IOLoop.current().stop()
-
- if __name__ == '__main__':
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
- stream = tornado.iostream.IOStream(s)
- stream.connect(("friendfeed.com", 80), send_request)
- tornado.ioloop.IOLoop.current().start()
-
- .. testoutput::
- :hide:
-
- """
- def __init__(self, socket, *args, **kwargs):
- self.socket = socket
- self.socket.setblocking(False)
- super(IOStream, self).__init__(*args, **kwargs)
-
- def fileno(self):
- return self.socket
-
- def close_fd(self):
- self.socket.close()
- self.socket = None
-
- def get_fd_error(self):
- errno = self.socket.getsockopt(socket.SOL_SOCKET,
- socket.SO_ERROR)
- return socket.error(errno, os.strerror(errno))
-
- def read_from_fd(self):
- try:
- chunk = self.socket.recv(self.read_chunk_size)
- except socket.error as e:
- if e.args[0] in _ERRNO_WOULDBLOCK:
- return None
- else:
- raise
- if not chunk:
- self.close()
- return None
- return chunk
-
- def write_to_fd(self, data):
- try:
- return self.socket.send(data)
- finally:
- # Avoid keeping to data, which can be a memoryview.
- # See https://github.com/tornadoweb/tornado/pull/2008
- del data
-
- def connect(self, address, callback=None, server_hostname=None):
- """Connects the socket to a remote address without blocking.
-
- May only be called if the socket passed to the constructor was
- not previously connected. The address parameter is in the
- same format as for `socket.connect <socket.socket.connect>` for
- the type of socket passed to the IOStream constructor,
- e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
- but will be resolved synchronously and block the IOLoop.
- If you have a hostname instead of an IP address, the `.TCPClient`
- class is recommended instead of calling this method directly.
- `.TCPClient` will do asynchronous DNS resolution and handle
- both IPv4 and IPv6.
-
- If ``callback`` is specified, it will be called with no
- arguments when the connection is completed; if not this method
- returns a `.Future` (whose result after a successful
- connection will be the stream itself).
-
- In SSL mode, the ``server_hostname`` parameter will be used
- for certificate validation (unless disabled in the
- ``ssl_options``) and SNI (if supported; requires Python
- 2.7.9+).
-
- Note that it is safe to call `IOStream.write
- <BaseIOStream.write>` while the connection is pending, in
- which case the data will be written as soon as the connection
- is ready. Calling `IOStream` read methods before the socket is
- connected works on some platforms but is non-portable.
-
- .. versionchanged:: 4.0
- If no callback is given, returns a `.Future`.
-
- .. versionchanged:: 4.2
- SSL certificates are validated by default; pass
- ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
- suitably-configured `ssl.SSLContext` to the
- `SSLIOStream` constructor to disable.
- """
- self._connecting = True
- if callback is not None:
- self._connect_callback = stack_context.wrap(callback)
- future = None
- else:
- future = self._connect_future = TracebackFuture()
- try:
- self.socket.connect(address)
- except socket.error as e:
- # In non-blocking mode we expect connect() to raise an
- # exception with EINPROGRESS or EWOULDBLOCK.
- #
- # On freebsd, other errors such as ECONNREFUSED may be
- # returned immediately when attempting to connect to
- # localhost, so handle them the same way as an error
- # reported later in _handle_connect.
- if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
- errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
- if future is None:
- gen_log.warning("Connect error on fd %s: %s",
- self.socket.fileno(), e)
- self.close(exc_info=True)
- return future
- self._add_io_state(self.io_loop.WRITE)
- return future
-
- def start_tls(self, server_side, ssl_options=None, server_hostname=None):
- """Convert this `IOStream` to an `SSLIOStream`.
-
- This enables protocols that begin in clear-text mode and
- switch to SSL after some initial negotiation (such as the
- ``STARTTLS`` extension to SMTP and IMAP).
-
- This method cannot be used if there are outstanding reads
- or writes on the stream, or if there is any data in the
- IOStream's buffer (data in the operating system's socket
- buffer is allowed). This means it must generally be used
- immediately after reading or writing the last clear-text
- data. It can also be used immediately after connecting,
- before any reads or writes.
-
- The ``ssl_options`` argument may be either an `ssl.SSLContext`
- object or a dictionary of keyword arguments for the
- `ssl.wrap_socket` function. The ``server_hostname`` argument
- will be used for certificate validation unless disabled
- in the ``ssl_options``.
-
- This method returns a `.Future` whose result is the new
- `SSLIOStream`. After this method has been called,
- any other operation on the original stream is undefined.
-
- If a close callback is defined on this stream, it will be
- transferred to the new stream.
-
- .. versionadded:: 4.0
-
- .. versionchanged:: 4.2
- SSL certificates are validated by default; pass
- ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
- suitably-configured `ssl.SSLContext` to disable.
- """
- if (self._read_callback or self._read_future or
- self._write_callback or self._write_futures or
- self._connect_callback or self._connect_future or
- self._pending_callbacks or self._closed or
- self._read_buffer or self._write_buffer):
- raise ValueError("IOStream is not idle; cannot convert to SSL")
- if ssl_options is None:
- if server_side:
- ssl_options = _server_ssl_defaults
- else:
- ssl_options = _client_ssl_defaults
-
- socket = self.socket
- self.io_loop.remove_handler(socket)
- self.socket = None
- socket = ssl_wrap_socket(socket, ssl_options,
- server_hostname=server_hostname,
- server_side=server_side,
- do_handshake_on_connect=False)
- orig_close_callback = self._close_callback
- self._close_callback = None
-
- future = TracebackFuture()
- ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
- io_loop=self.io_loop)
- # Wrap the original close callback so we can fail our Future as well.
- # If we had an "unwrap" counterpart to this method we would need
- # to restore the original callback after our Future resolves
- # so that repeated wrap/unwrap calls don't build up layers.
-
- def close_callback():
- if not future.done():
- # Note that unlike most Futures returned by IOStream,
- # this one passes the underlying error through directly
- # instead of wrapping everything in a StreamClosedError
- # with a real_error attribute. This is because once the
- # connection is established it's more helpful to raise
- # the SSLError directly than to hide it behind a
- # StreamClosedError (and the client is expecting SSL
- # issues rather than network issues since this method is
- # named start_tls).
- future.set_exception(ssl_stream.error or StreamClosedError())
- if orig_close_callback is not None:
- orig_close_callback()
- ssl_stream.set_close_callback(close_callback)
- ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
- ssl_stream.max_buffer_size = self.max_buffer_size
- ssl_stream.read_chunk_size = self.read_chunk_size
- return future
-
- def _handle_connect(self):
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- self.error = socket.error(err, os.strerror(err))
- # IOLoop implementations may vary: some of them return
- # an error state before the socket becomes writable, so
- # in that case a connection failure would be handled by the
- # error path in _handle_events instead of here.
- if self._connect_future is None:
- gen_log.warning("Connect error on fd %s: %s",
- self.socket.fileno(), errno.errorcode[err])
- self.close()
- return
- if self._connect_callback is not None:
- callback = self._connect_callback
- self._connect_callback = None
- self._run_callback(callback)
- if self._connect_future is not None:
- future = self._connect_future
- self._connect_future = None
- future.set_result(self)
- self._connecting = False
-
- def set_nodelay(self, value):
- if (self.socket is not None and
- self.socket.family in (socket.AF_INET, socket.AF_INET6)):
- try:
- self.socket.setsockopt(socket.IPPROTO_TCP,
- socket.TCP_NODELAY, 1 if value else 0)
- except socket.error as e:
- # Sometimes setsockopt will fail if the socket is closed
- # at the wrong time. This can happen with HTTPServer
- # resetting the value to false between requests.
- if e.errno != errno.EINVAL and not self._is_connreset(e):
- raise
-
-
-class SSLIOStream(IOStream):
- """A utility class to write to and read from a non-blocking SSL socket.
-
- If the socket passed to the constructor is already connected,
- it should be wrapped with::
-
- ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
-
- before constructing the `SSLIOStream`. Unconnected sockets will be
- wrapped when `IOStream.connect` is finished.
- """
- def __init__(self, *args, **kwargs):
- """The ``ssl_options`` keyword argument may either be an
- `ssl.SSLContext` object or a dictionary of keywords arguments
- for `ssl.wrap_socket`
- """
- self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults)
- super(SSLIOStream, self).__init__(*args, **kwargs)
- self._ssl_accepting = True
- self._handshake_reading = False
- self._handshake_writing = False
- self._ssl_connect_callback = None
- self._server_hostname = None
-
- # If the socket is already connected, attempt to start the handshake.
- try:
- self.socket.getpeername()
- except socket.error:
- pass
- else:
- # Indirectly start the handshake, which will run on the next
- # IOLoop iteration and then the real IO state will be set in
- # _handle_events.
- self._add_io_state(self.io_loop.WRITE)
-
- def reading(self):
- return self._handshake_reading or super(SSLIOStream, self).reading()
-
- def writing(self):
- return self._handshake_writing or super(SSLIOStream, self).writing()
-
- def _got_empty_write(self, size):
- # With OpenSSL, if we couldn't write the entire buffer,
- # the very same string object must be used on the
- # next call to send. Therefore we suppress
- # merging the write buffer after an incomplete send.
- # A cleaner solution would be to set
- # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
- # not yet accessible from python
- # (http://bugs.python.org/issue8240)
- self._freeze_write_buffer(size)
-
- def _do_ssl_handshake(self):
- # Based on code from test_ssl.py in the python stdlib
- try:
- self._handshake_reading = False
- self._handshake_writing = False
- self.socket.do_handshake()
- except ssl.SSLError as err:
- if err.args[0] == ssl.SSL_ERROR_WANT_READ:
- self._handshake_reading = True
- return
- elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
- self._handshake_writing = True
- return
- elif err.args[0] in (ssl.SSL_ERROR_EOF,
- ssl.SSL_ERROR_ZERO_RETURN):
- return self.close(exc_info=True)
- elif err.args[0] == ssl.SSL_ERROR_SSL:
- try:
- peer = self.socket.getpeername()
- except Exception:
- peer = '(not connected)'
- gen_log.warning("SSL Error on %s %s: %s",
- self.socket.fileno(), peer, err)
- return self.close(exc_info=True)
- raise
- except socket.error as err:
- # Some port scans (e.g. nmap in -sT mode) have been known
- # to cause do_handshake to raise EBADF and ENOTCONN, so make
- # those errors quiet as well.
- # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
- if (self._is_connreset(err) or
- err.args[0] in (errno.EBADF, errno.ENOTCONN)):
- return self.close(exc_info=True)
- raise
- except AttributeError:
- # On Linux, if the connection was reset before the call to
- # wrap_socket, do_handshake will fail with an
- # AttributeError.
- return self.close(exc_info=True)
- else:
- self._ssl_accepting = False
- if not self._verify_cert(self.socket.getpeercert()):
- self.close()
- return
- self._run_ssl_connect_callback()
-
- def _run_ssl_connect_callback(self):
- if self._ssl_connect_callback is not None:
- callback = self._ssl_connect_callback
- self._ssl_connect_callback = None
- self._run_callback(callback)
- if self._ssl_connect_future is not None:
- future = self._ssl_connect_future
- self._ssl_connect_future = None
- future.set_result(self)
-
- def _verify_cert(self, peercert):
- """Returns True if peercert is valid according to the configured
- validation mode and hostname.
-
- The ssl handshake already tested the certificate for a valid
- CA signature; the only thing that remains is to check
- the hostname.
- """
- if isinstance(self._ssl_options, dict):
- verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
- elif isinstance(self._ssl_options, ssl.SSLContext):
- verify_mode = self._ssl_options.verify_mode
- assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
- if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
- return True
- cert = self.socket.getpeercert()
- if cert is None and verify_mode == ssl.CERT_REQUIRED:
- gen_log.warning("No SSL certificate given")
- return False
- try:
- ssl_match_hostname(peercert, self._server_hostname)
- except SSLCertificateError as e:
- gen_log.warning("Invalid SSL certificate: %s" % e)
- return False
- else:
- return True
-
- def _handle_read(self):
- if self._ssl_accepting:
- self._do_ssl_handshake()
- return
- super(SSLIOStream, self)._handle_read()
-
- def _handle_write(self):
- if self._ssl_accepting:
- self._do_ssl_handshake()
- return
- super(SSLIOStream, self)._handle_write()
-
- def connect(self, address, callback=None, server_hostname=None):
- self._server_hostname = server_hostname
- # Pass a dummy callback to super.connect(), which is slightly
- # more efficient than letting it return a Future we ignore.
- super(SSLIOStream, self).connect(address, callback=lambda: None)
- return self.wait_for_handshake(callback)
-
- def _handle_connect(self):
- # Call the superclass method to check for errors.
- super(SSLIOStream, self)._handle_connect()
- if self.closed():
- return
- # When the connection is complete, wrap the socket for SSL
- # traffic. Note that we do this by overriding _handle_connect
- # instead of by passing a callback to super().connect because
- # user callbacks are enqueued asynchronously on the IOLoop,
- # but since _handle_events calls _handle_connect immediately
- # followed by _handle_write we need this to be synchronous.
- #
- # The IOLoop will get confused if we swap out self.socket while the
- # fd is registered, so remove it now and re-register after
- # wrap_socket().
- self.io_loop.remove_handler(self.socket)
- old_state = self._state
- self._state = None
- self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
- server_hostname=self._server_hostname,
- do_handshake_on_connect=False)
- self._add_io_state(old_state)
-
- def wait_for_handshake(self, callback=None):
- """Wait for the initial SSL handshake to complete.
-
- If a ``callback`` is given, it will be called with no
- arguments once the handshake is complete; otherwise this
- method returns a `.Future` which will resolve to the
- stream itself after the handshake is complete.
-
- Once the handshake is complete, information such as
- the peer's certificate and NPN/ALPN selections may be
- accessed on ``self.socket``.
-
- This method is intended for use on server-side streams
- or after using `IOStream.start_tls`; it should not be used
- with `IOStream.connect` (which already waits for the
- handshake to complete). It may only be called once per stream.
-
- .. versionadded:: 4.2
- """
- if (self._ssl_connect_callback is not None or
- self._ssl_connect_future is not None):
- raise RuntimeError("Already waiting")
- if callback is not None:
- self._ssl_connect_callback = stack_context.wrap(callback)
- future = None
- else:
- future = self._ssl_connect_future = TracebackFuture()
- if not self._ssl_accepting:
- self._run_ssl_connect_callback()
- return future
-
- def write_to_fd(self, data):
- try:
- return self.socket.send(data)
- except ssl.SSLError as e:
- if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
- # In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if
- # the socket is not writeable; we need to transform this into
- # an EWOULDBLOCK socket.error or a zero return value,
- # either of which will be recognized by the caller of this
- # method. Prior to Python 3.5, an unwriteable socket would
- # simply return 0 bytes written.
- return 0
- raise
- finally:
- # Avoid keeping to data, which can be a memoryview.
- # See https://github.com/tornadoweb/tornado/pull/2008
- del data
-
- def read_from_fd(self):
- if self._ssl_accepting:
- # If the handshake hasn't finished yet, there can't be anything
- # to read (attempting to read may or may not raise an exception
- # depending on the SSL version)
- return None
- try:
- # SSLSocket objects have both a read() and recv() method,
- # while regular sockets only have recv().
- # The recv() method blocks (at least in python 2.6) if it is
- # called when there is nothing to read, so we have to use
- # read() instead.
- chunk = self.socket.read(self.read_chunk_size)
- except ssl.SSLError as e:
- # SSLError is a subclass of socket.error, so this except
- # block must come first.
- if e.args[0] == ssl.SSL_ERROR_WANT_READ:
- return None
- else:
- raise
- except socket.error as e:
- if e.args[0] in _ERRNO_WOULDBLOCK:
- return None
- else:
- raise
- if not chunk:
- self.close()
- return None
- return chunk
-
- def _is_connreset(self, e):
- if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
- return True
- return super(SSLIOStream, self)._is_connreset(e)
-
-
-class PipeIOStream(BaseIOStream):
- """Pipe-based `IOStream` implementation.
-
- The constructor takes an integer file descriptor (such as one returned
- by `os.pipe`) rather than an open file object. Pipes are generally
- one-way, so a `PipeIOStream` can be used for reading or writing but not
- both.
- """
- def __init__(self, fd, *args, **kwargs):
- self.fd = fd
- _set_nonblocking(fd)
- super(PipeIOStream, self).__init__(*args, **kwargs)
-
- def fileno(self):
- return self.fd
-
- def close_fd(self):
- os.close(self.fd)
-
- def write_to_fd(self, data):
- try:
- return os.write(self.fd, data)
- finally:
- # Avoid keeping to data, which can be a memoryview.
- # See https://github.com/tornadoweb/tornado/pull/2008
- del data
-
- def read_from_fd(self):
- try:
- chunk = os.read(self.fd, self.read_chunk_size)
- except (IOError, OSError) as e:
- if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
- return None
- elif errno_from_exception(e) == errno.EBADF:
- # If the writing half of a pipe is closed, select will
- # report it as readable but reads will fail with EBADF.
- self.close(exc_info=True)
- return None
- else:
- raise
- if not chunk:
- self.close()
- return None
- return chunk
-
-
-def doctests():
- import doctest
- return doctest.DocTestSuite()
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utility classes to write to and read from non-blocking files and sockets.
+
+Contents:
+
+* `BaseIOStream`: Generic interface for reading and writing.
+* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
+* `SSLIOStream`: SSL-aware version of IOStream.
+* `PipeIOStream`: Pipe-based IOStream implementation.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+import errno
+import numbers
+import os
+import socket
+import sys
+import re
+
+from tornado.concurrent import TracebackFuture
+from tornado import ioloop
+from tornado.log import gen_log, app_log
+from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults
+from tornado import stack_context
+from tornado.util import errno_from_exception
+
+try:
+ from tornado.platform.posix import _set_nonblocking
+except ImportError:
+ _set_nonblocking = None
+
+try:
+ import ssl
+except ImportError:
+ # ssl is not available on Google App Engine
+ ssl = None
+
+# These errnos indicate that a non-blocking operation must be retried
+# at a later time. On most platforms they're the same value, but on
+# some they differ.
+_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
+
+if hasattr(errno, "WSAEWOULDBLOCK"):
+ _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore
+
+# These errnos indicate that a connection has been abruptly terminated.
+# They should be caught and handled less noisily than other errors.
+_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
+ errno.ETIMEDOUT)
+
+if hasattr(errno, "WSAECONNRESET"):
+ _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore
+
+if sys.platform == 'darwin':
+ # OSX appears to have a race condition that causes send(2) to return
+ # EPROTOTYPE if called while a socket is being torn down:
+ # http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
+ # Since the socket is being closed anyway, treat this as an ECONNRESET
+ # instead of an unexpected error.
+ _ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore
+
+# More non-portable errnos:
+_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
+
+if hasattr(errno, "WSAEINPROGRESS"):
+ _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) # type: ignore
+
+_WINDOWS = sys.platform.startswith('win')
+
+
+class StreamClosedError(IOError):
+ """Exception raised by `IOStream` methods when the stream is closed.
+
+ Note that the close callback is scheduled to run *after* other
+ callbacks on the stream (to allow for buffered data to be processed),
+ so you may see this error before you see the close callback.
+
+ The ``real_error`` attribute contains the underlying error that caused
+ the stream to close (if any).
+
+ .. versionchanged:: 4.3
+ Added the ``real_error`` attribute.
+ """
+ def __init__(self, real_error=None):
+ super(StreamClosedError, self).__init__('Stream is closed')
+ self.real_error = real_error
+
+
+class UnsatisfiableReadError(Exception):
+ """Exception raised when a read cannot be satisfied.
+
+ Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
+ argument.
+ """
+ pass
+
+
+class StreamBufferFullError(Exception):
+ """Exception raised by `IOStream` methods when the buffer is full.
+ """
+
+
+class BaseIOStream(object):
+ """A utility class to write to and read from a non-blocking file or socket.
+
+ We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
+ All of the methods take an optional ``callback`` argument and return a
+ `.Future` only if no callback is given. When the operation completes,
+ the callback will be run or the `.Future` will resolve with the data
+ read (or ``None`` for ``write()``). All outstanding ``Futures`` will
+ resolve with a `StreamClosedError` when the stream is closed; users
+ of the callback interface will be notified via
+ `.BaseIOStream.set_close_callback` instead.
+
+ When a stream is closed due to an error, the IOStream's ``error``
+ attribute contains the exception object.
+
+ Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
+ `read_from_fd`, and optionally `get_fd_error`.
+ """
+ def __init__(self, io_loop=None, max_buffer_size=None,
+ read_chunk_size=None, max_write_buffer_size=None):
+ """`BaseIOStream` constructor.
+
+ :arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
+ Deprecated since Tornado 4.1.
+ :arg max_buffer_size: Maximum amount of incoming data to buffer;
+ defaults to 100MB.
+ :arg read_chunk_size: Amount of data to read at one time from the
+ underlying transport; defaults to 64KB.
+ :arg max_write_buffer_size: Amount of outgoing data to buffer;
+ defaults to unlimited.
+
+ .. versionchanged:: 4.0
+ Add the ``max_write_buffer_size`` parameter. Changed default
+ ``read_chunk_size`` to 64KB.
+ """
+ self.io_loop = io_loop or ioloop.IOLoop.current()
+ self.max_buffer_size = max_buffer_size or 104857600
+ # A chunk size that is too close to max_buffer_size can cause
+ # spurious failures.
+ self.read_chunk_size = min(read_chunk_size or 65536,
+ self.max_buffer_size // 2)
+ self.max_write_buffer_size = max_write_buffer_size
+ self.error = None
+ self._read_buffer = bytearray()
+ self._read_buffer_pos = 0
+ self._read_buffer_size = 0
+ self._write_buffer = bytearray()
+ self._write_buffer_pos = 0
+ self._write_buffer_size = 0
+ self._write_buffer_frozen = False
+ self._total_write_index = 0
+ self._total_write_done_index = 0
+ self._pending_writes_while_frozen = []
+ self._read_delimiter = None
+ self._read_regex = None
+ self._read_max_bytes = None
+ self._read_bytes = None
+ self._read_partial = False
+ self._read_until_close = False
+ self._read_callback = None
+ self._read_future = None
+ self._streaming_callback = None
+ self._write_callback = None
+ self._write_futures = collections.deque()
+ self._close_callback = None
+ self._connect_callback = None
+ self._connect_future = None
+ # _ssl_connect_future should be defined in SSLIOStream
+ # but it's here so we can clean it up in maybe_run_close_callback.
+ # TODO: refactor that so subclasses can add additional futures
+ # to be cancelled.
+ self._ssl_connect_future = None
+ self._connecting = False
+ self._state = None
+ self._pending_callbacks = 0
+ self._closed = False
+
+ def fileno(self):
+ """Returns the file descriptor for this stream."""
+ raise NotImplementedError()
+
+ def close_fd(self):
+ """Closes the file underlying this stream.
+
+ ``close_fd`` is called by `BaseIOStream` and should not be called
+ elsewhere; other users should call `close` instead.
+ """
+ raise NotImplementedError()
+
+ def write_to_fd(self, data):
+ """Attempts to write ``data`` to the underlying file.
+
+ Returns the number of bytes written.
+ """
+ raise NotImplementedError()
+
+ def read_from_fd(self):
+ """Attempts to read from the underlying file.
+
+ Returns ``None`` if there was nothing to read (the socket
+ returned `~errno.EWOULDBLOCK` or equivalent), otherwise
+ returns the data. When possible, should return no more than
+ ``self.read_chunk_size`` bytes at a time.
+ """
+ raise NotImplementedError()
+
+ def get_fd_error(self):
+ """Returns information about any error on the underlying file.
+
+ This method is called after the `.IOLoop` has signaled an error on the
+ file descriptor, and should return an Exception (such as `socket.error`
+ with additional information, or None if no such information is
+ available.
+ """
+ return None
+
+ def read_until_regex(self, regex, callback=None, max_bytes=None):
+ """Asynchronously read until we have matched the given regex.
+
+ The result includes the data that matches the regex and anything
+ that came before it. If a callback is given, it will be run
+ with the data as an argument; if not, this method returns a
+ `.Future`.
+
+ If ``max_bytes`` is not None, the connection will be closed
+ if more than ``max_bytes`` bytes have been read and the regex is
+ not satisfied.
+
+ .. versionchanged:: 4.0
+ Added the ``max_bytes`` argument. The ``callback`` argument is
+ now optional and a `.Future` will be returned if it is omitted.
+ """
+ future = self._set_read_callback(callback)
+ self._read_regex = re.compile(regex)
+ self._read_max_bytes = max_bytes
+ try:
+ self._try_inline_read()
+ except UnsatisfiableReadError as e:
+ # Handle this the same way as in _handle_events.
+ gen_log.info("Unsatisfiable read, closing connection: %s" % e)
+ self.close(exc_info=True)
+ return future
+ except:
+ if future is not None:
+ # Ensure that the future doesn't log an error because its
+ # failure was never examined.
+ future.add_done_callback(lambda f: f.exception())
+ raise
+ return future
+
+ def read_until(self, delimiter, callback=None, max_bytes=None):
+ """Asynchronously read until we have found the given delimiter.
+
+ The result includes all the data read including the delimiter.
+ If a callback is given, it will be run with the data as an argument;
+ if not, this method returns a `.Future`.
+
+ If ``max_bytes`` is not None, the connection will be closed
+ if more than ``max_bytes`` bytes have been read and the delimiter
+ is not found.
+
+ .. versionchanged:: 4.0
+ Added the ``max_bytes`` argument. The ``callback`` argument is
+ now optional and a `.Future` will be returned if it is omitted.
+ """
+ future = self._set_read_callback(callback)
+ self._read_delimiter = delimiter
+ self._read_max_bytes = max_bytes
+ try:
+ self._try_inline_read()
+ except UnsatisfiableReadError as e:
+ # Handle this the same way as in _handle_events.
+ gen_log.info("Unsatisfiable read, closing connection: %s" % e)
+ self.close(exc_info=True)
+ return future
+ except:
+ if future is not None:
+ future.add_done_callback(lambda f: f.exception())
+ raise
+ return future
+
+ def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
+ partial=False):
+ """Asynchronously read a number of bytes.
+
+ If a ``streaming_callback`` is given, it will be called with chunks
+ of data as they become available, and the final result will be empty.
+ Otherwise, the result is all the data that was read.
+ If a callback is given, it will be run with the data as an argument;
+ if not, this method returns a `.Future`.
+
+ If ``partial`` is true, the callback is run as soon as we have
+ any bytes to return (but never more than ``num_bytes``)
+
+ .. versionchanged:: 4.0
+ Added the ``partial`` argument. The callback argument is now
+ optional and a `.Future` will be returned if it is omitted.
+ """
+ future = self._set_read_callback(callback)
+ assert isinstance(num_bytes, numbers.Integral)
+ self._read_bytes = num_bytes
+ self._read_partial = partial
+ self._streaming_callback = stack_context.wrap(streaming_callback)
+ try:
+ self._try_inline_read()
+ except:
+ if future is not None:
+ future.add_done_callback(lambda f: f.exception())
+ raise
+ return future
+
+ def read_until_close(self, callback=None, streaming_callback=None):
+ """Asynchronously reads all data from the socket until it is closed.
+
+ If a ``streaming_callback`` is given, it will be called with chunks
+ of data as they become available, and the final result will be empty.
+ Otherwise, the result is all the data that was read.
+ If a callback is given, it will be run with the data as an argument;
+ if not, this method returns a `.Future`.
+
+ Note that if a ``streaming_callback`` is used, data will be
+ read from the socket as quickly as it becomes available; there
+ is no way to apply backpressure or cancel the reads. If flow
+ control or cancellation are desired, use a loop with
+ `read_bytes(partial=True) <.read_bytes>` instead.
+
+ .. versionchanged:: 4.0
+ The callback argument is now optional and a `.Future` will
+ be returned if it is omitted.
+
+ """
+ future = self._set_read_callback(callback)
+ self._streaming_callback = stack_context.wrap(streaming_callback)
+ if self.closed():
+ if self._streaming_callback is not None:
+ self._run_read_callback(self._read_buffer_size, True)
+ self._run_read_callback(self._read_buffer_size, False)
+ return future
+ self._read_until_close = True
+ try:
+ self._try_inline_read()
+ except:
+ if future is not None:
+ future.add_done_callback(lambda f: f.exception())
+ raise
+ return future
+
+ def write(self, data, callback=None):
+ """Asynchronously write the given data to this stream.
+
+ If ``callback`` is given, we call it when all of the buffered write
+ data has been successfully written to the stream. If there was
+ previously buffered write data and an old write callback, that
+ callback is simply overwritten with this new callback.
+
+ If no ``callback`` is given, this method returns a `.Future` that
+ resolves (with a result of ``None``) when the write has been
+ completed.
+
+ The ``data`` argument may be of type `bytes` or `memoryview`.
+
+ .. versionchanged:: 4.0
+ Now returns a `.Future` if no callback is given.
+
+ .. versionchanged:: 4.5
+ Added support for `memoryview` arguments.
+ """
+ self._check_closed()
+ if data:
+ if (self.max_write_buffer_size is not None and
+ self._write_buffer_size + len(data) > self.max_write_buffer_size):
+ raise StreamBufferFullError("Reached maximum write buffer size")
+ if self._write_buffer_frozen:
+ self._pending_writes_while_frozen.append(data)
+ else:
+ self._write_buffer += data
+ self._write_buffer_size += len(data)
+ self._total_write_index += len(data)
+ if callback is not None:
+ self._write_callback = stack_context.wrap(callback)
+ future = None
+ else:
+ future = TracebackFuture()
+ future.add_done_callback(lambda f: f.exception())
+ self._write_futures.append((self._total_write_index, future))
+ if not self._connecting:
+ self._handle_write()
+ if self._write_buffer_size:
+ self._add_io_state(self.io_loop.WRITE)
+ self._maybe_add_error_listener()
+ return future
+
+ def set_close_callback(self, callback):
+ """Call the given callback when the stream is closed.
+
+ This is not necessary for applications that use the `.Future`
+ interface; all outstanding ``Futures`` will resolve with a
+ `StreamClosedError` when the stream is closed.
+ """
+ self._close_callback = stack_context.wrap(callback)
+ self._maybe_add_error_listener()
+
+ def close(self, exc_info=False):
+ """Close this stream.
+
+ If ``exc_info`` is true, set the ``error`` attribute to the current
+ exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
+ use that instead of `sys.exc_info`).
+ """
+ if not self.closed():
+ if exc_info:
+ if not isinstance(exc_info, tuple):
+ exc_info = sys.exc_info()
+ if any(exc_info):
+ self.error = exc_info[1]
+ if self._read_until_close:
+ if (self._streaming_callback is not None and
+ self._read_buffer_size):
+ self._run_read_callback(self._read_buffer_size, True)
+ self._read_until_close = False
+ self._run_read_callback(self._read_buffer_size, False)
+ if self._state is not None:
+ self.io_loop.remove_handler(self.fileno())
+ self._state = None
+ self.close_fd()
+ self._closed = True
+ self._maybe_run_close_callback()
+
+ def _maybe_run_close_callback(self):
+ # If there are pending callbacks, don't run the close callback
+ # until they're done (see _maybe_add_error_handler)
+ if self.closed() and self._pending_callbacks == 0:
+ futures = []
+ if self._read_future is not None:
+ futures.append(self._read_future)
+ self._read_future = None
+ futures += [future for _, future in self._write_futures]
+ self._write_futures.clear()
+ if self._connect_future is not None:
+ futures.append(self._connect_future)
+ self._connect_future = None
+ if self._ssl_connect_future is not None:
+ futures.append(self._ssl_connect_future)
+ self._ssl_connect_future = None
+ for future in futures:
+ future.set_exception(StreamClosedError(real_error=self.error))
+ if self._close_callback is not None:
+ cb = self._close_callback
+ self._close_callback = None
+ self._run_callback(cb)
+ # Delete any unfinished callbacks to break up reference cycles.
+ self._read_callback = self._write_callback = None
+ # Clear the buffers so they can be cleared immediately even
+ # if the IOStream object is kept alive by a reference cycle.
+ # TODO: Clear the read buffer too; it currently breaks some tests.
+ self._write_buffer = None
+ self._write_buffer_size = 0
+
+ def reading(self):
+ """Returns true if we are currently reading from the stream."""
+ return self._read_callback is not None or self._read_future is not None
+
+ def writing(self):
+ """Returns true if we are currently writing to the stream."""
+ return self._write_buffer_size > 0
+
+ def closed(self):
+ """Returns true if the stream has been closed."""
+ return self._closed
+
+ def set_nodelay(self, value):
+ """Sets the no-delay flag for this stream.
+
+ By default, data written to TCP streams may be held for a time
+ to make the most efficient use of bandwidth (according to
+ Nagle's algorithm). The no-delay flag requests that data be
+ written as soon as possible, even if doing so would consume
+ additional bandwidth.
+
+ This flag is currently defined only for TCP-based ``IOStreams``.
+
+ .. versionadded:: 3.1
+ """
+ pass
+
+ def _handle_events(self, fd, events):
+ if self.closed():
+ gen_log.warning("Got events for closed stream %s", fd)
+ return
+ try:
+ if self._connecting:
+ # Most IOLoops will report a write failed connect
+ # with the WRITE event, but SelectIOLoop reports a
+ # READ as well so we must check for connecting before
+ # either.
+ self._handle_connect()
+ if self.closed():
+ return
+ if events & self.io_loop.READ:
+ self._handle_read()
+ if self.closed():
+ return
+ if events & self.io_loop.WRITE:
+ self._handle_write()
+ if self.closed():
+ return
+ if events & self.io_loop.ERROR:
+ self.error = self.get_fd_error()
+ # We may have queued up a user callback in _handle_read or
+ # _handle_write, so don't close the IOStream until those
+ # callbacks have had a chance to run.
+ self.io_loop.add_callback(self.close)
+ return
+ state = self.io_loop.ERROR
+ if self.reading():
+ state |= self.io_loop.READ
+ if self.writing():
+ state |= self.io_loop.WRITE
+ if state == self.io_loop.ERROR and self._read_buffer_size == 0:
+ # If the connection is idle, listen for reads too so
+ # we can tell if the connection is closed. If there is
+ # data in the read buffer we won't run the close callback
+ # yet anyway, so we don't need to listen in this case.
+ state |= self.io_loop.READ
+ if state != self._state:
+ assert self._state is not None, \
+ "shouldn't happen: _handle_events without self._state"
+ self._state = state
+ self.io_loop.update_handler(self.fileno(), self._state)
+ except UnsatisfiableReadError as e:
+ gen_log.info("Unsatisfiable read, closing connection: %s" % e)
+ self.close(exc_info=True)
+ except Exception:
+ gen_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ self.close(exc_info=True)
+ raise
+
+ def _run_callback(self, callback, *args):
+ def wrapper():
+ self._pending_callbacks -= 1
+ try:
+ return callback(*args)
+ except Exception:
+ app_log.error("Uncaught exception, closing connection.",
+ exc_info=True)
+ # Close the socket on an uncaught exception from a user callback
+ # (It would eventually get closed when the socket object is
+ # gc'd, but we don't want to rely on gc happening before we
+ # run out of file descriptors)
+ self.close(exc_info=True)
+ # Re-raise the exception so that IOLoop.handle_callback_exception
+ # can see it and log the error
+ raise
+ finally:
+ self._maybe_add_error_listener()
+ # We schedule callbacks to be run on the next IOLoop iteration
+ # rather than running them directly for several reasons:
+ # * Prevents unbounded stack growth when a callback calls an
+ # IOLoop operation that immediately runs another callback
+ # * Provides a predictable execution context for e.g.
+ # non-reentrant mutexes
+ # * Ensures that the try/except in wrapper() is run outside
+ # of the application's StackContexts
+ with stack_context.NullContext():
+ # stack_context was already captured in callback, we don't need to
+ # capture it again for IOStream's wrapper. This is especially
+ # important if the callback was pre-wrapped before entry to
+ # IOStream (as in HTTPConnection._header_callback), as we could
+ # capture and leak the wrong context here.
+ self._pending_callbacks += 1
+ self.io_loop.add_callback(wrapper)
+
+ def _read_to_buffer_loop(self):
+ # This method is called from _handle_read and _try_inline_read.
+ try:
+ if self._read_bytes is not None:
+ target_bytes = self._read_bytes
+ elif self._read_max_bytes is not None:
+ target_bytes = self._read_max_bytes
+ elif self.reading():
+ # For read_until without max_bytes, or
+ # read_until_close, read as much as we can before
+ # scanning for the delimiter.
+ target_bytes = None
+ else:
+ target_bytes = 0
+ next_find_pos = 0
+ # Pretend to have a pending callback so that an EOF in
+ # _read_to_buffer doesn't trigger an immediate close
+ # callback. At the end of this method we'll either
+ # establish a real pending callback via
+ # _read_from_buffer or run the close callback.
+ #
+ # We need two try statements here so that
+ # pending_callbacks is decremented before the `except`
+ # clause below (which calls `close` and does need to
+ # trigger the callback)
+ self._pending_callbacks += 1
+ while not self.closed():
+ # Read from the socket until we get EWOULDBLOCK or equivalent.
+ # SSL sockets do some internal buffering, and if the data is
+ # sitting in the SSL object's buffer select() and friends
+ # can't see it; the only way to find out if it's there is to
+ # try to read it.
+ if self._read_to_buffer() == 0:
+ break
+
+ self._run_streaming_callback()
+
+ # If we've read all the bytes we can use, break out of
+ # this loop. We can't just call read_from_buffer here
+ # because of subtle interactions with the
+ # pending_callback and error_listener mechanisms.
+ #
+ # If we've reached target_bytes, we know we're done.
+ if (target_bytes is not None and
+ self._read_buffer_size >= target_bytes):
+ break
+
+ # Otherwise, we need to call the more expensive find_read_pos.
+ # It's inefficient to do this on every read, so instead
+ # do it on the first read and whenever the read buffer
+ # size has doubled.
+ if self._read_buffer_size >= next_find_pos:
+ pos = self._find_read_pos()
+ if pos is not None:
+ return pos
+ next_find_pos = self._read_buffer_size * 2
+ return self._find_read_pos()
+ finally:
+ self._pending_callbacks -= 1
+
+ def _handle_read(self):
+ try:
+ pos = self._read_to_buffer_loop()
+ except UnsatisfiableReadError:
+ raise
+ except Exception as e:
+ gen_log.warning("error on read: %s" % e)
+ self.close(exc_info=True)
+ return
+ if pos is not None:
+ self._read_from_buffer(pos)
+ return
+ else:
+ self._maybe_run_close_callback()
+
+ def _set_read_callback(self, callback):
+ assert self._read_callback is None, "Already reading"
+ assert self._read_future is None, "Already reading"
+ if callback is not None:
+ self._read_callback = stack_context.wrap(callback)
+ else:
+ self._read_future = TracebackFuture()
+ return self._read_future
+
+ def _run_read_callback(self, size, streaming):
+ if streaming:
+ callback = self._streaming_callback
+ else:
+ callback = self._read_callback
+ self._read_callback = self._streaming_callback = None
+ if self._read_future is not None:
+ assert callback is None
+ future = self._read_future
+ self._read_future = None
+ future.set_result(self._consume(size))
+ if callback is not None:
+ assert (self._read_future is None) or streaming
+ self._run_callback(callback, self._consume(size))
+ else:
+ # If we scheduled a callback, we will add the error listener
+ # afterwards. If we didn't, we have to do it now.
+ self._maybe_add_error_listener()
+
+ def _try_inline_read(self):
+ """Attempt to complete the current read operation from buffered data.
+
+ If the read can be completed without blocking, schedules the
+ read callback on the next IOLoop iteration; otherwise starts
+ listening for reads on the socket.
+ """
+ # See if we've already got the data from a previous read
+ self._run_streaming_callback()
+ pos = self._find_read_pos()
+ if pos is not None:
+ self._read_from_buffer(pos)
+ return
+ self._check_closed()
+ try:
+ pos = self._read_to_buffer_loop()
+ except Exception:
+ # If there was an in _read_to_buffer, we called close() already,
+ # but couldn't run the close callback because of _pending_callbacks.
+ # Before we escape from this function, run the close callback if
+ # applicable.
+ self._maybe_run_close_callback()
+ raise
+ if pos is not None:
+ self._read_from_buffer(pos)
+ return
+ # We couldn't satisfy the read inline, so either close the stream
+ # or listen for new data.
+ if self.closed():
+ self._maybe_run_close_callback()
+ else:
+ self._add_io_state(ioloop.IOLoop.READ)
+
+ def _read_to_buffer(self):
+ """Reads from the socket and appends the result to the read buffer.
+
+ Returns the number of bytes read. Returns 0 if there is nothing
+ to read (i.e. the read returns EWOULDBLOCK or equivalent). On
+ error closes the socket and raises an exception.
+ """
+ while True:
+ try:
+ chunk = self.read_from_fd()
+ except (socket.error, IOError, OSError) as e:
+ if errno_from_exception(e) == errno.EINTR:
+ continue
+ # ssl.SSLError is a subclass of socket.error
+ if self._is_connreset(e):
+ # Treat ECONNRESET as a connection close rather than
+ # an error to minimize log spam (the exception will
+ # be available on self.error for apps that care).
+ self.close(exc_info=True)
+ return
+ self.close(exc_info=True)
+ raise
+ break
+ if chunk is None:
+ return 0
+ self._read_buffer += chunk
+ self._read_buffer_size += len(chunk)
+ if self._read_buffer_size > self.max_buffer_size:
+ gen_log.error("Reached maximum read buffer size")
+ self.close()
+ raise StreamBufferFullError("Reached maximum read buffer size")
+ return len(chunk)
+
+ def _run_streaming_callback(self):
+ if self._streaming_callback is not None and self._read_buffer_size:
+ bytes_to_consume = self._read_buffer_size
+ if self._read_bytes is not None:
+ bytes_to_consume = min(self._read_bytes, bytes_to_consume)
+ self._read_bytes -= bytes_to_consume
+ self._run_read_callback(bytes_to_consume, True)
+
+ def _read_from_buffer(self, pos):
+ """Attempts to complete the currently-pending read from the buffer.
+
+ The argument is either a position in the read buffer or None,
+ as returned by _find_read_pos.
+ """
+ self._read_bytes = self._read_delimiter = self._read_regex = None
+ self._read_partial = False
+ self._run_read_callback(pos, False)
+
+ def _find_read_pos(self):
+ """Attempts to find a position in the read buffer that satisfies
+ the currently-pending read.
+
+ Returns a position in the buffer if the current read can be satisfied,
+ or None if it cannot.
+ """
+ if (self._read_bytes is not None and
+ (self._read_buffer_size >= self._read_bytes or
+ (self._read_partial and self._read_buffer_size > 0))):
+ num_bytes = min(self._read_bytes, self._read_buffer_size)
+ return num_bytes
+ elif self._read_delimiter is not None:
+ # Multi-byte delimiters (e.g. '\r\n') may straddle two
+ # chunks in the read buffer, so we can't easily find them
+ # without collapsing the buffer. However, since protocols
+ # using delimited reads (as opposed to reads of a known
+ # length) tend to be "line" oriented, the delimiter is likely
+ # to be in the first few chunks. Merge the buffer gradually
+ # since large merges are relatively expensive and get undone in
+ # _consume().
+ if self._read_buffer:
+ loc = self._read_buffer.find(self._read_delimiter,
+ self._read_buffer_pos)
+ if loc != -1:
+ loc -= self._read_buffer_pos
+ delimiter_len = len(self._read_delimiter)
+ self._check_max_bytes(self._read_delimiter,
+ loc + delimiter_len)
+ return loc + delimiter_len
+ self._check_max_bytes(self._read_delimiter,
+ self._read_buffer_size)
+ elif self._read_regex is not None:
+ if self._read_buffer:
+ m = self._read_regex.search(self._read_buffer,
+ self._read_buffer_pos)
+ if m is not None:
+ loc = m.end() - self._read_buffer_pos
+ self._check_max_bytes(self._read_regex, loc)
+ return loc
+ self._check_max_bytes(self._read_regex, self._read_buffer_size)
+ return None
+
+ def _check_max_bytes(self, delimiter, size):
+ if (self._read_max_bytes is not None and
+ size > self._read_max_bytes):
+ raise UnsatisfiableReadError(
+ "delimiter %r not found within %d bytes" % (
+ delimiter, self._read_max_bytes))
+
+ def _freeze_write_buffer(self, size):
+ self._write_buffer_frozen = size
+
+ def _unfreeze_write_buffer(self):
+ self._write_buffer_frozen = False
+ self._write_buffer += b''.join(self._pending_writes_while_frozen)
+ self._write_buffer_size += sum(map(len, self._pending_writes_while_frozen))
+ self._pending_writes_while_frozen[:] = []
+
+ def _got_empty_write(self, size):
+ """
+ Called when a non-blocking write() failed writing anything.
+ Can be overridden in subclasses.
+ """
+
+ def _handle_write(self):
+ while self._write_buffer_size:
+ assert self._write_buffer_size >= 0
+ try:
+ start = self._write_buffer_pos
+ if self._write_buffer_frozen:
+ size = self._write_buffer_frozen
+ elif _WINDOWS:
+ # On windows, socket.send blows up if given a
+ # write buffer that's too large, instead of just
+ # returning the number of bytes it was able to
+ # process. Therefore we must not call socket.send
+ # with more than 128KB at a time.
+ size = 128 * 1024
+ else:
+ size = self._write_buffer_size
+ num_bytes = self.write_to_fd(
+ memoryview(self._write_buffer)[start:start + size])
+ if num_bytes == 0:
+ self._got_empty_write(size)
+ break
+ self._write_buffer_pos += num_bytes
+ self._write_buffer_size -= num_bytes
+ # Amortized O(1) shrink
+ # (this heuristic is implemented natively in Python 3.4+
+ # but is replicated here for Python 2)
+ if self._write_buffer_pos > self._write_buffer_size:
+ del self._write_buffer[:self._write_buffer_pos]
+ self._write_buffer_pos = 0
+ if self._write_buffer_frozen:
+ self._unfreeze_write_buffer()
+ self._total_write_done_index += num_bytes
+ except (socket.error, IOError, OSError) as e:
+ if e.args[0] in _ERRNO_WOULDBLOCK:
+ self._got_empty_write(size)
+ break
+ else:
+ if not self._is_connreset(e):
+ # Broken pipe errors are usually caused by connection
+ # reset, and its better to not log EPIPE errors to
+ # minimize log spam
+ gen_log.warning("Write error on %s: %s",
+ self.fileno(), e)
+ self.close(exc_info=True)
+ return
+
+ while self._write_futures:
+ index, future = self._write_futures[0]
+ if index > self._total_write_done_index:
+ break
+ self._write_futures.popleft()
+ future.set_result(None)
+
+ if not self._write_buffer_size:
+ if self._write_callback:
+ callback = self._write_callback
+ self._write_callback = None
+ self._run_callback(callback)
+
+ def _consume(self, loc):
+ # Consume loc bytes from the read buffer and return them
+ if loc == 0:
+ return b""
+ assert loc <= self._read_buffer_size
+ # Slice the bytearray buffer into bytes, without intermediate copying
+ b = (memoryview(self._read_buffer)
+ [self._read_buffer_pos:self._read_buffer_pos + loc]
+ ).tobytes()
+ self._read_buffer_pos += loc
+ self._read_buffer_size -= loc
+ # Amortized O(1) shrink
+ # (this heuristic is implemented natively in Python 3.4+
+ # but is replicated here for Python 2)
+ if self._read_buffer_pos > self._read_buffer_size:
+ del self._read_buffer[:self._read_buffer_pos]
+ self._read_buffer_pos = 0
+ return b
+
+ def _check_closed(self):
+ if self.closed():
+ raise StreamClosedError(real_error=self.error)
+
+ def _maybe_add_error_listener(self):
+ # This method is part of an optimization: to detect a connection that
+ # is closed when we're not actively reading or writing, we must listen
+ # for read events. However, it is inefficient to do this when the
+ # connection is first established because we are going to read or write
+ # immediately anyway. Instead, we insert checks at various times to
+ # see if the connection is idle and add the read listener then.
+ if self._pending_callbacks != 0:
+ return
+ if self._state is None or self._state == ioloop.IOLoop.ERROR:
+ if self.closed():
+ self._maybe_run_close_callback()
+ elif (self._read_buffer_size == 0 and
+ self._close_callback is not None):
+ self._add_io_state(ioloop.IOLoop.READ)
+
+ def _add_io_state(self, state):
+ """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
+
+ Implementation notes: Reads and writes have a fast path and a
+ slow path. The fast path reads synchronously from socket
+ buffers, while the slow path uses `_add_io_state` to schedule
+ an IOLoop callback. Note that in both cases, the callback is
+ run asynchronously with `_run_callback`.
+
+ To detect closed connections, we must have called
+ `_add_io_state` at some point, but we want to delay this as
+ much as possible so we don't have to set an `IOLoop.ERROR`
+ listener that will be overwritten by the next slow-path
+ operation. As long as there are callbacks scheduled for
+ fast-path ops, those callbacks may do more reads.
+ If a sequence of fast-path ops do not end in a slow-path op,
+ (e.g. for an @asynchronous long-poll request), we must add
+ the error handler. This is done in `_run_callback` and `write`
+ (since the write callback is optional so we can have a
+ fast-path write with no `_run_callback`)
+ """
+ if self.closed():
+ # connection has been closed, so there can be no future events
+ return
+ if self._state is None:
+ self._state = ioloop.IOLoop.ERROR | state
+ with stack_context.NullContext():
+ self.io_loop.add_handler(
+ self.fileno(), self._handle_events, self._state)
+ elif not self._state & state:
+ self._state = self._state | state
+ self.io_loop.update_handler(self.fileno(), self._state)
+
+ def _is_connreset(self, exc):
+ """Return true if exc is ECONNRESET or equivalent.
+
+ May be overridden in subclasses.
+ """
+ return (isinstance(exc, (socket.error, IOError)) and
+ errno_from_exception(exc) in _ERRNO_CONNRESET)
+
+
+class IOStream(BaseIOStream):
+ r"""Socket-based `IOStream` implementation.
+
+ This class supports the read and write methods from `BaseIOStream`
+ plus a `connect` method.
+
+ The ``socket`` parameter may either be connected or unconnected.
+ For server operations the socket is the result of calling
+ `socket.accept <socket.socket.accept>`. For client operations the
+ socket is created with `socket.socket`, and may either be
+ connected before passing it to the `IOStream` or connected with
+ `IOStream.connect`.
+
+ A very simple (and broken) HTTP client using this class:
+
+ .. testcode::
+
+ import tornado.ioloop
+ import tornado.iostream
+ import socket
+
+ def send_request():
+ stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
+ stream.read_until(b"\r\n\r\n", on_headers)
+
+ def on_headers(data):
+ headers = {}
+ for line in data.split(b"\r\n"):
+ parts = line.split(b":")
+ if len(parts) == 2:
+ headers[parts[0].strip()] = parts[1].strip()
+ stream.read_bytes(int(headers[b"Content-Length"]), on_body)
+
+ def on_body(data):
+ print(data)
+ stream.close()
+ tornado.ioloop.IOLoop.current().stop()
+
+ if __name__ == '__main__':
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ stream = tornado.iostream.IOStream(s)
+ stream.connect(("friendfeed.com", 80), send_request)
+ tornado.ioloop.IOLoop.current().start()
+
+ .. testoutput::
+ :hide:
+
+ """
+ def __init__(self, socket, *args, **kwargs):
+ self.socket = socket
+ self.socket.setblocking(False)
+ super(IOStream, self).__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self.socket
+
+ def close_fd(self):
+ self.socket.close()
+ self.socket = None
+
+ def get_fd_error(self):
+ errno = self.socket.getsockopt(socket.SOL_SOCKET,
+ socket.SO_ERROR)
+ return socket.error(errno, os.strerror(errno))
+
+ def read_from_fd(self):
+ try:
+ chunk = self.socket.recv(self.read_chunk_size)
+ except socket.error as e:
+ if e.args[0] in _ERRNO_WOULDBLOCK:
+ return None
+ else:
+ raise
+ if not chunk:
+ self.close()
+ return None
+ return chunk
+
+ def write_to_fd(self, data):
+ try:
+ return self.socket.send(data)
+ finally:
+ # Avoid keeping to data, which can be a memoryview.
+ # See https://github.com/tornadoweb/tornado/pull/2008
+ del data
+
+ def connect(self, address, callback=None, server_hostname=None):
+ """Connects the socket to a remote address without blocking.
+
+ May only be called if the socket passed to the constructor was
+ not previously connected. The address parameter is in the
+ same format as for `socket.connect <socket.socket.connect>` for
+ the type of socket passed to the IOStream constructor,
+ e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
+ but will be resolved synchronously and block the IOLoop.
+ If you have a hostname instead of an IP address, the `.TCPClient`
+ class is recommended instead of calling this method directly.
+ `.TCPClient` will do asynchronous DNS resolution and handle
+ both IPv4 and IPv6.
+
+ If ``callback`` is specified, it will be called with no
+ arguments when the connection is completed; if not this method
+ returns a `.Future` (whose result after a successful
+ connection will be the stream itself).
+
+ In SSL mode, the ``server_hostname`` parameter will be used
+ for certificate validation (unless disabled in the
+ ``ssl_options``) and SNI (if supported; requires Python
+ 2.7.9+).
+
+ Note that it is safe to call `IOStream.write
+ <BaseIOStream.write>` while the connection is pending, in
+ which case the data will be written as soon as the connection
+ is ready. Calling `IOStream` read methods before the socket is
+ connected works on some platforms but is non-portable.
+
+ .. versionchanged:: 4.0
+ If no callback is given, returns a `.Future`.
+
+ .. versionchanged:: 4.2
+ SSL certificates are validated by default; pass
+ ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
+ suitably-configured `ssl.SSLContext` to the
+ `SSLIOStream` constructor to disable.
+ """
+ self._connecting = True
+ if callback is not None:
+ self._connect_callback = stack_context.wrap(callback)
+ future = None
+ else:
+ future = self._connect_future = TracebackFuture()
+ try:
+ self.socket.connect(address)
+ except socket.error as e:
+ # In non-blocking mode we expect connect() to raise an
+ # exception with EINPROGRESS or EWOULDBLOCK.
+ #
+ # On freebsd, other errors such as ECONNREFUSED may be
+ # returned immediately when attempting to connect to
+ # localhost, so handle them the same way as an error
+ # reported later in _handle_connect.
+ if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
+ errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
+ if future is None:
+ gen_log.warning("Connect error on fd %s: %s",
+ self.socket.fileno(), e)
+ self.close(exc_info=True)
+ return future
+ self._add_io_state(self.io_loop.WRITE)
+ return future
+
+ def start_tls(self, server_side, ssl_options=None, server_hostname=None):
+ """Convert this `IOStream` to an `SSLIOStream`.
+
+ This enables protocols that begin in clear-text mode and
+ switch to SSL after some initial negotiation (such as the
+ ``STARTTLS`` extension to SMTP and IMAP).
+
+ This method cannot be used if there are outstanding reads
+ or writes on the stream, or if there is any data in the
+ IOStream's buffer (data in the operating system's socket
+ buffer is allowed). This means it must generally be used
+ immediately after reading or writing the last clear-text
+ data. It can also be used immediately after connecting,
+ before any reads or writes.
+
+ The ``ssl_options`` argument may be either an `ssl.SSLContext`
+ object or a dictionary of keyword arguments for the
+ `ssl.wrap_socket` function. The ``server_hostname`` argument
+ will be used for certificate validation unless disabled
+ in the ``ssl_options``.
+
+ This method returns a `.Future` whose result is the new
+ `SSLIOStream`. After this method has been called,
+ any other operation on the original stream is undefined.
+
+ If a close callback is defined on this stream, it will be
+ transferred to the new stream.
+
+ .. versionadded:: 4.0
+
+ .. versionchanged:: 4.2
+ SSL certificates are validated by default; pass
+ ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
+ suitably-configured `ssl.SSLContext` to disable.
+ """
+ if (self._read_callback or self._read_future or
+ self._write_callback or self._write_futures or
+ self._connect_callback or self._connect_future or
+ self._pending_callbacks or self._closed or
+ self._read_buffer or self._write_buffer):
+ raise ValueError("IOStream is not idle; cannot convert to SSL")
+ if ssl_options is None:
+ if server_side:
+ ssl_options = _server_ssl_defaults
+ else:
+ ssl_options = _client_ssl_defaults
+
+ socket = self.socket
+ self.io_loop.remove_handler(socket)
+ self.socket = None
+ socket = ssl_wrap_socket(socket, ssl_options,
+ server_hostname=server_hostname,
+ server_side=server_side,
+ do_handshake_on_connect=False)
+ orig_close_callback = self._close_callback
+ self._close_callback = None
+
+ future = TracebackFuture()
+ ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
+ io_loop=self.io_loop)
+ # Wrap the original close callback so we can fail our Future as well.
+ # If we had an "unwrap" counterpart to this method we would need
+ # to restore the original callback after our Future resolves
+ # so that repeated wrap/unwrap calls don't build up layers.
+
+ def close_callback():
+ if not future.done():
+ # Note that unlike most Futures returned by IOStream,
+ # this one passes the underlying error through directly
+ # instead of wrapping everything in a StreamClosedError
+ # with a real_error attribute. This is because once the
+ # connection is established it's more helpful to raise
+ # the SSLError directly than to hide it behind a
+ # StreamClosedError (and the client is expecting SSL
+ # issues rather than network issues since this method is
+ # named start_tls).
+ future.set_exception(ssl_stream.error or StreamClosedError())
+ if orig_close_callback is not None:
+ orig_close_callback()
+ ssl_stream.set_close_callback(close_callback)
+ ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
+ ssl_stream.max_buffer_size = self.max_buffer_size
+ ssl_stream.read_chunk_size = self.read_chunk_size
+ return future
+
+ def _handle_connect(self):
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ self.error = socket.error(err, os.strerror(err))
+ # IOLoop implementations may vary: some of them return
+ # an error state before the socket becomes writable, so
+ # in that case a connection failure would be handled by the
+ # error path in _handle_events instead of here.
+ if self._connect_future is None:
+ gen_log.warning("Connect error on fd %s: %s",
+ self.socket.fileno(), errno.errorcode[err])
+ self.close()
+ return
+ if self._connect_callback is not None:
+ callback = self._connect_callback
+ self._connect_callback = None
+ self._run_callback(callback)
+ if self._connect_future is not None:
+ future = self._connect_future
+ self._connect_future = None
+ future.set_result(self)
+ self._connecting = False
+
+ def set_nodelay(self, value):
+ if (self.socket is not None and
+ self.socket.family in (socket.AF_INET, socket.AF_INET6)):
+ try:
+ self.socket.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_NODELAY, 1 if value else 0)
+ except socket.error as e:
+ # Sometimes setsockopt will fail if the socket is closed
+ # at the wrong time. This can happen with HTTPServer
+ # resetting the value to false between requests.
+ if e.errno != errno.EINVAL and not self._is_connreset(e):
+ raise
+
+
+class SSLIOStream(IOStream):
+ """A utility class to write to and read from a non-blocking SSL socket.
+
+ If the socket passed to the constructor is already connected,
+ it should be wrapped with::
+
+ ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
+
+ before constructing the `SSLIOStream`. Unconnected sockets will be
+ wrapped when `IOStream.connect` is finished.
+ """
+ def __init__(self, *args, **kwargs):
+ """The ``ssl_options`` keyword argument may either be an
+ `ssl.SSLContext` object or a dictionary of keywords arguments
+ for `ssl.wrap_socket`
+ """
+ self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults)
+ super(SSLIOStream, self).__init__(*args, **kwargs)
+ self._ssl_accepting = True
+ self._handshake_reading = False
+ self._handshake_writing = False
+ self._ssl_connect_callback = None
+ self._server_hostname = None
+
+ # If the socket is already connected, attempt to start the handshake.
+ try:
+ self.socket.getpeername()
+ except socket.error:
+ pass
+ else:
+ # Indirectly start the handshake, which will run on the next
+ # IOLoop iteration and then the real IO state will be set in
+ # _handle_events.
+ self._add_io_state(self.io_loop.WRITE)
+
+ def reading(self):
+ return self._handshake_reading or super(SSLIOStream, self).reading()
+
+ def writing(self):
+ return self._handshake_writing or super(SSLIOStream, self).writing()
+
+ def _got_empty_write(self, size):
+ # With OpenSSL, if we couldn't write the entire buffer,
+ # the very same string object must be used on the
+ # next call to send. Therefore we suppress
+ # merging the write buffer after an incomplete send.
+ # A cleaner solution would be to set
+ # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
+ # not yet accessible from python
+ # (http://bugs.python.org/issue8240)
+ self._freeze_write_buffer(size)
+
+ def _do_ssl_handshake(self):
+ # Based on code from test_ssl.py in the python stdlib
+ try:
+ self._handshake_reading = False
+ self._handshake_writing = False
+ self.socket.do_handshake()
+ except ssl.SSLError as err:
+ if err.args[0] == ssl.SSL_ERROR_WANT_READ:
+ self._handshake_reading = True
+ return
+ elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
+ self._handshake_writing = True
+ return
+ elif err.args[0] in (ssl.SSL_ERROR_EOF,
+ ssl.SSL_ERROR_ZERO_RETURN):
+ return self.close(exc_info=True)
+ elif err.args[0] == ssl.SSL_ERROR_SSL:
+ try:
+ peer = self.socket.getpeername()
+ except Exception:
+ peer = '(not connected)'
+ gen_log.warning("SSL Error on %s %s: %s",
+ self.socket.fileno(), peer, err)
+ return self.close(exc_info=True)
+ raise
+ except socket.error as err:
+ # Some port scans (e.g. nmap in -sT mode) have been known
+ # to cause do_handshake to raise EBADF and ENOTCONN, so make
+ # those errors quiet as well.
+ # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
+ if (self._is_connreset(err) or
+ err.args[0] in (errno.EBADF, errno.ENOTCONN)):
+ return self.close(exc_info=True)
+ raise
+ except AttributeError:
+ # On Linux, if the connection was reset before the call to
+ # wrap_socket, do_handshake will fail with an
+ # AttributeError.
+ return self.close(exc_info=True)
+ else:
+ self._ssl_accepting = False
+ if not self._verify_cert(self.socket.getpeercert()):
+ self.close()
+ return
+ self._run_ssl_connect_callback()
+
+ def _run_ssl_connect_callback(self):
+ if self._ssl_connect_callback is not None:
+ callback = self._ssl_connect_callback
+ self._ssl_connect_callback = None
+ self._run_callback(callback)
+ if self._ssl_connect_future is not None:
+ future = self._ssl_connect_future
+ self._ssl_connect_future = None
+ future.set_result(self)
+
+ def _verify_cert(self, peercert):
+ """Returns True if peercert is valid according to the configured
+ validation mode and hostname.
+
+ The ssl handshake already tested the certificate for a valid
+ CA signature; the only thing that remains is to check
+ the hostname.
+ """
+ if isinstance(self._ssl_options, dict):
+ verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
+ elif isinstance(self._ssl_options, ssl.SSLContext):
+ verify_mode = self._ssl_options.verify_mode
+ assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
+ if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
+ return True
+ cert = self.socket.getpeercert()
+ if cert is None and verify_mode == ssl.CERT_REQUIRED:
+ gen_log.warning("No SSL certificate given")
+ return False
+ try:
+ ssl_match_hostname(peercert, self._server_hostname)
+ except SSLCertificateError as e:
+ gen_log.warning("Invalid SSL certificate: %s" % e)
+ return False
+ else:
+ return True
+
+ def _handle_read(self):
+ if self._ssl_accepting:
+ self._do_ssl_handshake()
+ return
+ super(SSLIOStream, self)._handle_read()
+
+ def _handle_write(self):
+ if self._ssl_accepting:
+ self._do_ssl_handshake()
+ return
+ super(SSLIOStream, self)._handle_write()
+
+ def connect(self, address, callback=None, server_hostname=None):
+ self._server_hostname = server_hostname
+ # Pass a dummy callback to super.connect(), which is slightly
+ # more efficient than letting it return a Future we ignore.
+ super(SSLIOStream, self).connect(address, callback=lambda: None)
+ return self.wait_for_handshake(callback)
+
+ def _handle_connect(self):
+ # Call the superclass method to check for errors.
+ super(SSLIOStream, self)._handle_connect()
+ if self.closed():
+ return
+ # When the connection is complete, wrap the socket for SSL
+ # traffic. Note that we do this by overriding _handle_connect
+ # instead of by passing a callback to super().connect because
+ # user callbacks are enqueued asynchronously on the IOLoop,
+ # but since _handle_events calls _handle_connect immediately
+ # followed by _handle_write we need this to be synchronous.
+ #
+ # The IOLoop will get confused if we swap out self.socket while the
+ # fd is registered, so remove it now and re-register after
+ # wrap_socket().
+ self.io_loop.remove_handler(self.socket)
+ old_state = self._state
+ self._state = None
+ self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
+ server_hostname=self._server_hostname,
+ do_handshake_on_connect=False)
+ self._add_io_state(old_state)
+
+ def wait_for_handshake(self, callback=None):
+ """Wait for the initial SSL handshake to complete.
+
+ If a ``callback`` is given, it will be called with no
+ arguments once the handshake is complete; otherwise this
+ method returns a `.Future` which will resolve to the
+ stream itself after the handshake is complete.
+
+ Once the handshake is complete, information such as
+ the peer's certificate and NPN/ALPN selections may be
+ accessed on ``self.socket``.
+
+ This method is intended for use on server-side streams
+ or after using `IOStream.start_tls`; it should not be used
+ with `IOStream.connect` (which already waits for the
+ handshake to complete). It may only be called once per stream.
+
+ .. versionadded:: 4.2
+ """
+ if (self._ssl_connect_callback is not None or
+ self._ssl_connect_future is not None):
+ raise RuntimeError("Already waiting")
+ if callback is not None:
+ self._ssl_connect_callback = stack_context.wrap(callback)
+ future = None
+ else:
+ future = self._ssl_connect_future = TracebackFuture()
+ if not self._ssl_accepting:
+ self._run_ssl_connect_callback()
+ return future
+
+ def write_to_fd(self, data):
+ try:
+ return self.socket.send(data)
+ except ssl.SSLError as e:
+ if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
+ # In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if
+ # the socket is not writeable; we need to transform this into
+ # an EWOULDBLOCK socket.error or a zero return value,
+ # either of which will be recognized by the caller of this
+ # method. Prior to Python 3.5, an unwriteable socket would
+ # simply return 0 bytes written.
+ return 0
+ raise
+ finally:
+ # Avoid keeping to data, which can be a memoryview.
+ # See https://github.com/tornadoweb/tornado/pull/2008
+ del data
+
+ def read_from_fd(self):
+ if self._ssl_accepting:
+ # If the handshake hasn't finished yet, there can't be anything
+ # to read (attempting to read may or may not raise an exception
+ # depending on the SSL version)
+ return None
+ try:
+ # SSLSocket objects have both a read() and recv() method,
+ # while regular sockets only have recv().
+ # The recv() method blocks (at least in python 2.6) if it is
+ # called when there is nothing to read, so we have to use
+ # read() instead.
+ chunk = self.socket.read(self.read_chunk_size)
+ except ssl.SSLError as e:
+ # SSLError is a subclass of socket.error, so this except
+ # block must come first.
+ if e.args[0] == ssl.SSL_ERROR_WANT_READ:
+ return None
+ else:
+ raise
+ except socket.error as e:
+ if e.args[0] in _ERRNO_WOULDBLOCK:
+ return None
+ else:
+ raise
+ if not chunk:
+ self.close()
+ return None
+ return chunk
+
+ def _is_connreset(self, e):
+ if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
+ return True
+ return super(SSLIOStream, self)._is_connreset(e)
+
+
+class PipeIOStream(BaseIOStream):
+ """Pipe-based `IOStream` implementation.
+
+ The constructor takes an integer file descriptor (such as one returned
+ by `os.pipe`) rather than an open file object. Pipes are generally
+ one-way, so a `PipeIOStream` can be used for reading or writing but not
+ both.
+ """
+ def __init__(self, fd, *args, **kwargs):
+ self.fd = fd
+ _set_nonblocking(fd)
+ super(PipeIOStream, self).__init__(*args, **kwargs)
+
+ def fileno(self):
+ return self.fd
+
+ def close_fd(self):
+ os.close(self.fd)
+
+ def write_to_fd(self, data):
+ try:
+ return os.write(self.fd, data)
+ finally:
+ # Avoid keeping to data, which can be a memoryview.
+ # See https://github.com/tornadoweb/tornado/pull/2008
+ del data
+
+ def read_from_fd(self):
+ try:
+ chunk = os.read(self.fd, self.read_chunk_size)
+ except (IOError, OSError) as e:
+ if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
+ return None
+ elif errno_from_exception(e) == errno.EBADF:
+ # If the writing half of a pipe is closed, select will
+ # report it as readable but reads will fail with EBADF.
+ self.close(exc_info=True)
+ return None
+ else:
+ raise
+ if not chunk:
+ self.close()
+ return None
+ return chunk
+
+
+def doctests():
+ import doctest
+ return doctest.DocTestSuite()
diff --git a/contrib/python/tornado/tornado-4/tornado/locale.py b/contrib/python/tornado/tornado-4/tornado/locale.py
index 7dba10d616..682bc534b0 100644
--- a/contrib/python/tornado/tornado-4/tornado/locale.py
+++ b/contrib/python/tornado/tornado-4/tornado/locale.py
@@ -1,521 +1,521 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Translation methods for generating localized strings.
-
-To load a locale and generate a translated string::
-
- user_locale = tornado.locale.get("es_LA")
- print(user_locale.translate("Sign out"))
-
-`tornado.locale.get()` returns the closest matching locale, not necessarily the
-specific locale you requested. You can support pluralization with
-additional arguments to `~Locale.translate()`, e.g.::
-
- people = [...]
- message = user_locale.translate(
- "%(list)s is online", "%(list)s are online", len(people))
- print(message % {"list": user_locale.list(people)})
-
-The first string is chosen if ``len(people) == 1``, otherwise the second
-string is chosen.
-
-Applications should call one of `load_translations` (which uses a simple
-CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
-supported by `gettext` and related tools). If neither method is called,
-the `Locale.translate` method will simply return the original string.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import codecs
-import csv
-import datetime
-from io import BytesIO
-import numbers
-import os
-import re
-
-from tornado import escape
-from tornado.log import gen_log
-from tornado.util import PY3
-
-from tornado._locale_data import LOCALE_NAMES
-
-_default_locale = "en_US"
-_translations = {} # type: dict
-_supported_locales = frozenset([_default_locale])
-_use_gettext = False
-CONTEXT_SEPARATOR = "\x04"
-
-
-def get(*locale_codes):
- """Returns the closest match for the given locale codes.
-
- We iterate over all given locale codes in order. If we have a tight
- or a loose match for the code (e.g., "en" for "en_US"), we return
- the locale. Otherwise we move to the next code in the list.
-
- By default we return ``en_US`` if no translations are found for any of
- the specified locales. You can change the default locale with
- `set_default_locale()`.
- """
- return Locale.get_closest(*locale_codes)
-
-
-def set_default_locale(code):
- """Sets the default locale.
-
- The default locale is assumed to be the language used for all strings
- in the system. The translations loaded from disk are mappings from
- the default locale to the destination locale. Consequently, you don't
- need to create a translation file for the default locale.
- """
- global _default_locale
- global _supported_locales
- _default_locale = code
- _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
-
-
-def load_translations(directory, encoding=None):
- """Loads translations from CSV files in a directory.
-
- Translations are strings with optional Python-style named placeholders
- (e.g., ``My name is %(name)s``) and their associated translations.
-
- The directory should have translation files of the form ``LOCALE.csv``,
- e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
- translation, and an optional plural indicator. Plural indicators should
- be one of "plural" or "singular". A given string can have both singular
- and plural forms. For example ``%(name)s liked this`` may have a
- different verb conjugation depending on whether %(name)s is one
- name or a list of names. There should be two rows in the CSV file for
- that string, one with plural indicator "singular", and one "plural".
- For strings with no verbs that would change on translation, simply
- use "unknown" or the empty string (or don't include the column at all).
-
- The file is read using the `csv` module in the default "excel" dialect.
- In this format there should not be spaces after the commas.
-
- If no ``encoding`` parameter is given, the encoding will be
- detected automatically (among UTF-8 and UTF-16) if the file
- contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM
- is present.
-
- Example translation ``es_LA.csv``::
-
- "I love you","Te amo"
- "%(name)s liked this","A %(name)s les gustó esto","plural"
- "%(name)s liked this","A %(name)s le gustó esto","singular"
-
- .. versionchanged:: 4.3
- Added ``encoding`` parameter. Added support for BOM-based encoding
- detection, UTF-16, and UTF-8-with-BOM.
- """
- global _translations
- global _supported_locales
- _translations = {}
- for path in os.listdir(directory):
- if not path.endswith(".csv"):
- continue
- locale, extension = path.split(".")
- if not re.match("[a-z]+(_[A-Z]+)?$", locale):
- gen_log.error("Unrecognized locale %r (path: %s)", locale,
- os.path.join(directory, path))
- continue
- full_path = os.path.join(directory, path)
- if encoding is None:
- # Try to autodetect encoding based on the BOM.
- with open(full_path, 'rb') as f:
- data = f.read(len(codecs.BOM_UTF16_LE))
- if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
- encoding = 'utf-16'
- else:
- # utf-8-sig is "utf-8 with optional BOM". It's discouraged
- # in most cases but is common with CSV files because Excel
- # cannot read utf-8 files without a BOM.
- encoding = 'utf-8-sig'
- if PY3:
- # python 3: csv.reader requires a file open in text mode.
- # Force utf8 to avoid dependence on $LANG environment variable.
- f = open(full_path, "r", encoding=encoding)
- else:
- # python 2: csv can only handle byte strings (in ascii-compatible
- # encodings), which we decode below. Transcode everything into
- # utf8 before passing it to csv.reader.
- f = BytesIO()
- with codecs.open(full_path, "r", encoding=encoding) as infile:
- f.write(escape.utf8(infile.read()))
- f.seek(0)
- _translations[locale] = {}
- for i, row in enumerate(csv.reader(f)):
- if not row or len(row) < 2:
- continue
- row = [escape.to_unicode(c).strip() for c in row]
- english, translation = row[:2]
- if len(row) > 2:
- plural = row[2] or "unknown"
- else:
- plural = "unknown"
- if plural not in ("plural", "singular", "unknown"):
- gen_log.error("Unrecognized plural indicator %r in %s line %d",
- plural, path, i + 1)
- continue
- _translations[locale].setdefault(plural, {})[english] = translation
- f.close()
- _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
- gen_log.debug("Supported locales: %s", sorted(_supported_locales))
-
-
-def load_gettext_translations(directory, domain):
- """Loads translations from `gettext`'s locale tree
-
- Locale tree is similar to system's ``/usr/share/locale``, like::
-
- {directory}/{lang}/LC_MESSAGES/{domain}.mo
-
- Three steps are required to have your app translated:
-
- 1. Generate POT translation file::
-
- xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
-
- 2. Merge against existing POT file::
-
- msgmerge old.po mydomain.po > new.po
-
- 3. Compile::
-
- msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
- """
- import gettext
- global _translations
- global _supported_locales
- global _use_gettext
- _translations = {}
- for lang in os.listdir(directory):
- if lang.startswith('.'):
- continue # skip .svn, etc
- if os.path.isfile(os.path.join(directory, lang)):
- continue
- try:
- os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
- _translations[lang] = gettext.translation(domain, directory,
- languages=[lang])
- except Exception as e:
- gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
- continue
- _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
- _use_gettext = True
- gen_log.debug("Supported locales: %s", sorted(_supported_locales))
-
-
-def get_supported_locales():
- """Returns a list of all the supported locale codes."""
- return _supported_locales
-
-
-class Locale(object):
- """Object representing a locale.
-
- After calling one of `load_translations` or `load_gettext_translations`,
- call `get` or `get_closest` to get a Locale object.
- """
- @classmethod
- def get_closest(cls, *locale_codes):
- """Returns the closest match for the given locale code."""
- for code in locale_codes:
- if not code:
- continue
- code = code.replace("-", "_")
- parts = code.split("_")
- if len(parts) > 2:
- continue
- elif len(parts) == 2:
- code = parts[0].lower() + "_" + parts[1].upper()
- if code in _supported_locales:
- return cls.get(code)
- if parts[0].lower() in _supported_locales:
- return cls.get(parts[0].lower())
- return cls.get(_default_locale)
-
- @classmethod
- def get(cls, code):
- """Returns the Locale for the given locale code.
-
- If it is not supported, we raise an exception.
- """
- if not hasattr(cls, "_cache"):
- cls._cache = {}
- if code not in cls._cache:
- assert code in _supported_locales
- translations = _translations.get(code, None)
- if translations is None:
- locale = CSVLocale(code, {})
- elif _use_gettext:
- locale = GettextLocale(code, translations)
- else:
- locale = CSVLocale(code, translations)
- cls._cache[code] = locale
- return cls._cache[code]
-
- def __init__(self, code, translations):
- self.code = code
- self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown")
- self.rtl = False
- for prefix in ["fa", "ar", "he"]:
- if self.code.startswith(prefix):
- self.rtl = True
- break
- self.translations = translations
-
- # Initialize strings for date formatting
- _ = self.translate
- self._months = [
- _("January"), _("February"), _("March"), _("April"),
- _("May"), _("June"), _("July"), _("August"),
- _("September"), _("October"), _("November"), _("December")]
- self._weekdays = [
- _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
- _("Friday"), _("Saturday"), _("Sunday")]
-
- def translate(self, message, plural_message=None, count=None):
- """Returns the translation for the given message for this locale.
-
- If ``plural_message`` is given, you must also provide
- ``count``. We return ``plural_message`` when ``count != 1``,
- and we return the singular form for the given message when
- ``count == 1``.
- """
- raise NotImplementedError()
-
- def pgettext(self, context, message, plural_message=None, count=None):
- raise NotImplementedError()
-
- def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
- full_format=False):
- """Formats the given date (which should be GMT).
-
- By default, we return a relative time (e.g., "2 minutes ago"). You
- can return an absolute date string with ``relative=False``.
-
- You can force a full format date ("July 10, 1980") with
- ``full_format=True``.
-
- This method is primarily intended for dates in the past.
- For dates in the future, we fall back to full format.
- """
- if isinstance(date, numbers.Real):
- date = datetime.datetime.utcfromtimestamp(date)
- now = datetime.datetime.utcnow()
- if date > now:
- if relative and (date - now).seconds < 60:
- # Due to click skew, things are some things slightly
- # in the future. Round timestamps in the immediate
- # future down to now in relative mode.
- date = now
- else:
- # Otherwise, future dates always use the full format.
- full_format = True
- local_date = date - datetime.timedelta(minutes=gmt_offset)
- local_now = now - datetime.timedelta(minutes=gmt_offset)
- local_yesterday = local_now - datetime.timedelta(hours=24)
- difference = now - date
- seconds = difference.seconds
- days = difference.days
-
- _ = self.translate
- format = None
- if not full_format:
- if relative and days == 0:
- if seconds < 50:
- return _("1 second ago", "%(seconds)d seconds ago",
- seconds) % {"seconds": seconds}
-
- if seconds < 50 * 60:
- minutes = round(seconds / 60.0)
- return _("1 minute ago", "%(minutes)d minutes ago",
- minutes) % {"minutes": minutes}
-
- hours = round(seconds / (60.0 * 60))
- return _("1 hour ago", "%(hours)d hours ago",
- hours) % {"hours": hours}
-
- if days == 0:
- format = _("%(time)s")
- elif days == 1 and local_date.day == local_yesterday.day and \
- relative:
- format = _("yesterday") if shorter else \
- _("yesterday at %(time)s")
- elif days < 5:
- format = _("%(weekday)s") if shorter else \
- _("%(weekday)s at %(time)s")
- elif days < 334: # 11mo, since confusing for same month last year
- format = _("%(month_name)s %(day)s") if shorter else \
- _("%(month_name)s %(day)s at %(time)s")
-
- if format is None:
- format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
- _("%(month_name)s %(day)s, %(year)s at %(time)s")
-
- tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
- if tfhour_clock:
- str_time = "%d:%02d" % (local_date.hour, local_date.minute)
- elif self.code == "zh_CN":
- str_time = "%s%d:%02d" % (
- (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12],
- local_date.hour % 12 or 12, local_date.minute)
- else:
- str_time = "%d:%02d %s" % (
- local_date.hour % 12 or 12, local_date.minute,
- ("am", "pm")[local_date.hour >= 12])
-
- return format % {
- "month_name": self._months[local_date.month - 1],
- "weekday": self._weekdays[local_date.weekday()],
- "day": str(local_date.day),
- "year": str(local_date.year),
- "time": str_time
- }
-
- def format_day(self, date, gmt_offset=0, dow=True):
- """Formats the given date as a day of week.
-
- Example: "Monday, January 22". You can remove the day of week with
- ``dow=False``.
- """
- local_date = date - datetime.timedelta(minutes=gmt_offset)
- _ = self.translate
- if dow:
- return _("%(weekday)s, %(month_name)s %(day)s") % {
- "month_name": self._months[local_date.month - 1],
- "weekday": self._weekdays[local_date.weekday()],
- "day": str(local_date.day),
- }
- else:
- return _("%(month_name)s %(day)s") % {
- "month_name": self._months[local_date.month - 1],
- "day": str(local_date.day),
- }
-
- def list(self, parts):
- """Returns a comma-separated list for the given list of parts.
-
- The format is, e.g., "A, B and C", "A and B" or just "A" for lists
- of size 1.
- """
- _ = self.translate
- if len(parts) == 0:
- return ""
- if len(parts) == 1:
- return parts[0]
- comma = u' \u0648 ' if self.code.startswith("fa") else u", "
- return _("%(commas)s and %(last)s") % {
- "commas": comma.join(parts[:-1]),
- "last": parts[len(parts) - 1],
- }
-
- def friendly_number(self, value):
- """Returns a comma-separated number for the given integer."""
- if self.code not in ("en", "en_US"):
- return str(value)
- value = str(value)
- parts = []
- while value:
- parts.append(value[-3:])
- value = value[:-3]
- return ",".join(reversed(parts))
-
-
-class CSVLocale(Locale):
- """Locale implementation using tornado's CSV translation format."""
- def translate(self, message, plural_message=None, count=None):
- if plural_message is not None:
- assert count is not None
- if count != 1:
- message = plural_message
- message_dict = self.translations.get("plural", {})
- else:
- message_dict = self.translations.get("singular", {})
- else:
- message_dict = self.translations.get("unknown", {})
- return message_dict.get(message, message)
-
- def pgettext(self, context, message, plural_message=None, count=None):
- if self.translations:
- gen_log.warning('pgettext is not supported by CSVLocale')
- return self.translate(message, plural_message, count)
-
-
-class GettextLocale(Locale):
- """Locale implementation using the `gettext` module."""
- def __init__(self, code, translations):
- try:
- # python 2
- self.ngettext = translations.ungettext
- self.gettext = translations.ugettext
- except AttributeError:
- # python 3
- self.ngettext = translations.ngettext
- self.gettext = translations.gettext
- # self.gettext must exist before __init__ is called, since it
- # calls into self.translate
- super(GettextLocale, self).__init__(code, translations)
-
- def translate(self, message, plural_message=None, count=None):
- if plural_message is not None:
- assert count is not None
- return self.ngettext(message, plural_message, count)
- else:
- return self.gettext(message)
-
- def pgettext(self, context, message, plural_message=None, count=None):
- """Allows to set context for translation, accepts plural forms.
-
- Usage example::
-
- pgettext("law", "right")
- pgettext("good", "right")
-
- Plural message example::
-
- pgettext("organization", "club", "clubs", len(clubs))
- pgettext("stick", "club", "clubs", len(clubs))
-
- To generate POT file with context, add following options to step 1
- of `load_gettext_translations` sequence::
-
- xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3
-
- .. versionadded:: 4.2
- """
- if plural_message is not None:
- assert count is not None
- msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, message),
- "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message),
- count)
- result = self.ngettext(*msgs_with_ctxt)
- if CONTEXT_SEPARATOR in result:
- # Translation not found
- result = self.ngettext(message, plural_message, count)
- return result
- else:
- msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
- result = self.gettext(msg_with_ctxt)
- if CONTEXT_SEPARATOR in result:
- # Translation not found
- result = message
- return result
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Translation methods for generating localized strings.
+
+To load a locale and generate a translated string::
+
+ user_locale = tornado.locale.get("es_LA")
+ print(user_locale.translate("Sign out"))
+
+`tornado.locale.get()` returns the closest matching locale, not necessarily the
+specific locale you requested. You can support pluralization with
+additional arguments to `~Locale.translate()`, e.g.::
+
+ people = [...]
+ message = user_locale.translate(
+ "%(list)s is online", "%(list)s are online", len(people))
+ print(message % {"list": user_locale.list(people)})
+
+The first string is chosen if ``len(people) == 1``, otherwise the second
+string is chosen.
+
+Applications should call one of `load_translations` (which uses a simple
+CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
+supported by `gettext` and related tools). If neither method is called,
+the `Locale.translate` method will simply return the original string.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import codecs
+import csv
+import datetime
+from io import BytesIO
+import numbers
+import os
+import re
+
+from tornado import escape
+from tornado.log import gen_log
+from tornado.util import PY3
+
+from tornado._locale_data import LOCALE_NAMES
+
+_default_locale = "en_US"
+_translations = {} # type: dict
+_supported_locales = frozenset([_default_locale])
+_use_gettext = False
+CONTEXT_SEPARATOR = "\x04"
+
+
+def get(*locale_codes):
+ """Returns the closest match for the given locale codes.
+
+ We iterate over all given locale codes in order. If we have a tight
+ or a loose match for the code (e.g., "en" for "en_US"), we return
+ the locale. Otherwise we move to the next code in the list.
+
+ By default we return ``en_US`` if no translations are found for any of
+ the specified locales. You can change the default locale with
+ `set_default_locale()`.
+ """
+ return Locale.get_closest(*locale_codes)
+
+
+def set_default_locale(code):
+ """Sets the default locale.
+
+ The default locale is assumed to be the language used for all strings
+ in the system. The translations loaded from disk are mappings from
+ the default locale to the destination locale. Consequently, you don't
+ need to create a translation file for the default locale.
+ """
+ global _default_locale
+ global _supported_locales
+ _default_locale = code
+ _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
+
+
+def load_translations(directory, encoding=None):
+ """Loads translations from CSV files in a directory.
+
+ Translations are strings with optional Python-style named placeholders
+ (e.g., ``My name is %(name)s``) and their associated translations.
+
+ The directory should have translation files of the form ``LOCALE.csv``,
+ e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
+ translation, and an optional plural indicator. Plural indicators should
+ be one of "plural" or "singular". A given string can have both singular
+ and plural forms. For example ``%(name)s liked this`` may have a
+ different verb conjugation depending on whether %(name)s is one
+ name or a list of names. There should be two rows in the CSV file for
+ that string, one with plural indicator "singular", and one "plural".
+ For strings with no verbs that would change on translation, simply
+ use "unknown" or the empty string (or don't include the column at all).
+
+ The file is read using the `csv` module in the default "excel" dialect.
+ In this format there should not be spaces after the commas.
+
+ If no ``encoding`` parameter is given, the encoding will be
+ detected automatically (among UTF-8 and UTF-16) if the file
+ contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM
+ is present.
+
+ Example translation ``es_LA.csv``::
+
+ "I love you","Te amo"
+ "%(name)s liked this","A %(name)s les gustó esto","plural"
+ "%(name)s liked this","A %(name)s le gustó esto","singular"
+
+ .. versionchanged:: 4.3
+ Added ``encoding`` parameter. Added support for BOM-based encoding
+ detection, UTF-16, and UTF-8-with-BOM.
+ """
+ global _translations
+ global _supported_locales
+ _translations = {}
+ for path in os.listdir(directory):
+ if not path.endswith(".csv"):
+ continue
+ locale, extension = path.split(".")
+ if not re.match("[a-z]+(_[A-Z]+)?$", locale):
+ gen_log.error("Unrecognized locale %r (path: %s)", locale,
+ os.path.join(directory, path))
+ continue
+ full_path = os.path.join(directory, path)
+ if encoding is None:
+ # Try to autodetect encoding based on the BOM.
+ with open(full_path, 'rb') as f:
+ data = f.read(len(codecs.BOM_UTF16_LE))
+ if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
+ encoding = 'utf-16'
+ else:
+ # utf-8-sig is "utf-8 with optional BOM". It's discouraged
+ # in most cases but is common with CSV files because Excel
+ # cannot read utf-8 files without a BOM.
+ encoding = 'utf-8-sig'
+ if PY3:
+ # python 3: csv.reader requires a file open in text mode.
+ # Force utf8 to avoid dependence on $LANG environment variable.
+ f = open(full_path, "r", encoding=encoding)
+ else:
+ # python 2: csv can only handle byte strings (in ascii-compatible
+ # encodings), which we decode below. Transcode everything into
+ # utf8 before passing it to csv.reader.
+ f = BytesIO()
+ with codecs.open(full_path, "r", encoding=encoding) as infile:
+ f.write(escape.utf8(infile.read()))
+ f.seek(0)
+ _translations[locale] = {}
+ for i, row in enumerate(csv.reader(f)):
+ if not row or len(row) < 2:
+ continue
+ row = [escape.to_unicode(c).strip() for c in row]
+ english, translation = row[:2]
+ if len(row) > 2:
+ plural = row[2] or "unknown"
+ else:
+ plural = "unknown"
+ if plural not in ("plural", "singular", "unknown"):
+ gen_log.error("Unrecognized plural indicator %r in %s line %d",
+ plural, path, i + 1)
+ continue
+ _translations[locale].setdefault(plural, {})[english] = translation
+ f.close()
+ _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
+ gen_log.debug("Supported locales: %s", sorted(_supported_locales))
+
+
+def load_gettext_translations(directory, domain):
+ """Loads translations from `gettext`'s locale tree
+
+ Locale tree is similar to system's ``/usr/share/locale``, like::
+
+ {directory}/{lang}/LC_MESSAGES/{domain}.mo
+
+ Three steps are required to have your app translated:
+
+ 1. Generate POT translation file::
+
+ xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
+
+ 2. Merge against existing POT file::
+
+ msgmerge old.po mydomain.po > new.po
+
+ 3. Compile::
+
+ msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
+ """
+ import gettext
+ global _translations
+ global _supported_locales
+ global _use_gettext
+ _translations = {}
+ for lang in os.listdir(directory):
+ if lang.startswith('.'):
+ continue # skip .svn, etc
+ if os.path.isfile(os.path.join(directory, lang)):
+ continue
+ try:
+ os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
+ _translations[lang] = gettext.translation(domain, directory,
+ languages=[lang])
+ except Exception as e:
+ gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
+ continue
+ _supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
+ _use_gettext = True
+ gen_log.debug("Supported locales: %s", sorted(_supported_locales))
+
+
+def get_supported_locales():
+ """Returns a list of all the supported locale codes."""
+ return _supported_locales
+
+
+class Locale(object):
+ """Object representing a locale.
+
+ After calling one of `load_translations` or `load_gettext_translations`,
+ call `get` or `get_closest` to get a Locale object.
+ """
+ @classmethod
+ def get_closest(cls, *locale_codes):
+ """Returns the closest match for the given locale code."""
+ for code in locale_codes:
+ if not code:
+ continue
+ code = code.replace("-", "_")
+ parts = code.split("_")
+ if len(parts) > 2:
+ continue
+ elif len(parts) == 2:
+ code = parts[0].lower() + "_" + parts[1].upper()
+ if code in _supported_locales:
+ return cls.get(code)
+ if parts[0].lower() in _supported_locales:
+ return cls.get(parts[0].lower())
+ return cls.get(_default_locale)
+
+ @classmethod
+ def get(cls, code):
+ """Returns the Locale for the given locale code.
+
+ If it is not supported, we raise an exception.
+ """
+ if not hasattr(cls, "_cache"):
+ cls._cache = {}
+ if code not in cls._cache:
+ assert code in _supported_locales
+ translations = _translations.get(code, None)
+ if translations is None:
+ locale = CSVLocale(code, {})
+ elif _use_gettext:
+ locale = GettextLocale(code, translations)
+ else:
+ locale = CSVLocale(code, translations)
+ cls._cache[code] = locale
+ return cls._cache[code]
+
+ def __init__(self, code, translations):
+ self.code = code
+ self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown")
+ self.rtl = False
+ for prefix in ["fa", "ar", "he"]:
+ if self.code.startswith(prefix):
+ self.rtl = True
+ break
+ self.translations = translations
+
+ # Initialize strings for date formatting
+ _ = self.translate
+ self._months = [
+ _("January"), _("February"), _("March"), _("April"),
+ _("May"), _("June"), _("July"), _("August"),
+ _("September"), _("October"), _("November"), _("December")]
+ self._weekdays = [
+ _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
+ _("Friday"), _("Saturday"), _("Sunday")]
+
+ def translate(self, message, plural_message=None, count=None):
+ """Returns the translation for the given message for this locale.
+
+ If ``plural_message`` is given, you must also provide
+ ``count``. We return ``plural_message`` when ``count != 1``,
+ and we return the singular form for the given message when
+ ``count == 1``.
+ """
+ raise NotImplementedError()
+
+ def pgettext(self, context, message, plural_message=None, count=None):
+ raise NotImplementedError()
+
+ def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
+ full_format=False):
+ """Formats the given date (which should be GMT).
+
+ By default, we return a relative time (e.g., "2 minutes ago"). You
+ can return an absolute date string with ``relative=False``.
+
+ You can force a full format date ("July 10, 1980") with
+ ``full_format=True``.
+
+ This method is primarily intended for dates in the past.
+ For dates in the future, we fall back to full format.
+ """
+ if isinstance(date, numbers.Real):
+ date = datetime.datetime.utcfromtimestamp(date)
+ now = datetime.datetime.utcnow()
+ if date > now:
+ if relative and (date - now).seconds < 60:
+ # Due to click skew, things are some things slightly
+ # in the future. Round timestamps in the immediate
+ # future down to now in relative mode.
+ date = now
+ else:
+ # Otherwise, future dates always use the full format.
+ full_format = True
+ local_date = date - datetime.timedelta(minutes=gmt_offset)
+ local_now = now - datetime.timedelta(minutes=gmt_offset)
+ local_yesterday = local_now - datetime.timedelta(hours=24)
+ difference = now - date
+ seconds = difference.seconds
+ days = difference.days
+
+ _ = self.translate
+ format = None
+ if not full_format:
+ if relative and days == 0:
+ if seconds < 50:
+ return _("1 second ago", "%(seconds)d seconds ago",
+ seconds) % {"seconds": seconds}
+
+ if seconds < 50 * 60:
+ minutes = round(seconds / 60.0)
+ return _("1 minute ago", "%(minutes)d minutes ago",
+ minutes) % {"minutes": minutes}
+
+ hours = round(seconds / (60.0 * 60))
+ return _("1 hour ago", "%(hours)d hours ago",
+ hours) % {"hours": hours}
+
+ if days == 0:
+ format = _("%(time)s")
+ elif days == 1 and local_date.day == local_yesterday.day and \
+ relative:
+ format = _("yesterday") if shorter else \
+ _("yesterday at %(time)s")
+ elif days < 5:
+ format = _("%(weekday)s") if shorter else \
+ _("%(weekday)s at %(time)s")
+ elif days < 334: # 11mo, since confusing for same month last year
+ format = _("%(month_name)s %(day)s") if shorter else \
+ _("%(month_name)s %(day)s at %(time)s")
+
+ if format is None:
+ format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
+ _("%(month_name)s %(day)s, %(year)s at %(time)s")
+
+ tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
+ if tfhour_clock:
+ str_time = "%d:%02d" % (local_date.hour, local_date.minute)
+ elif self.code == "zh_CN":
+ str_time = "%s%d:%02d" % (
+ (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12],
+ local_date.hour % 12 or 12, local_date.minute)
+ else:
+ str_time = "%d:%02d %s" % (
+ local_date.hour % 12 or 12, local_date.minute,
+ ("am", "pm")[local_date.hour >= 12])
+
+ return format % {
+ "month_name": self._months[local_date.month - 1],
+ "weekday": self._weekdays[local_date.weekday()],
+ "day": str(local_date.day),
+ "year": str(local_date.year),
+ "time": str_time
+ }
+
+ def format_day(self, date, gmt_offset=0, dow=True):
+ """Formats the given date as a day of week.
+
+ Example: "Monday, January 22". You can remove the day of week with
+ ``dow=False``.
+ """
+ local_date = date - datetime.timedelta(minutes=gmt_offset)
+ _ = self.translate
+ if dow:
+ return _("%(weekday)s, %(month_name)s %(day)s") % {
+ "month_name": self._months[local_date.month - 1],
+ "weekday": self._weekdays[local_date.weekday()],
+ "day": str(local_date.day),
+ }
+ else:
+ return _("%(month_name)s %(day)s") % {
+ "month_name": self._months[local_date.month - 1],
+ "day": str(local_date.day),
+ }
+
+ def list(self, parts):
+ """Returns a comma-separated list for the given list of parts.
+
+ The format is, e.g., "A, B and C", "A and B" or just "A" for lists
+ of size 1.
+ """
+ _ = self.translate
+ if len(parts) == 0:
+ return ""
+ if len(parts) == 1:
+ return parts[0]
+ comma = u' \u0648 ' if self.code.startswith("fa") else u", "
+ return _("%(commas)s and %(last)s") % {
+ "commas": comma.join(parts[:-1]),
+ "last": parts[len(parts) - 1],
+ }
+
+ def friendly_number(self, value):
+ """Returns a comma-separated number for the given integer."""
+ if self.code not in ("en", "en_US"):
+ return str(value)
+ value = str(value)
+ parts = []
+ while value:
+ parts.append(value[-3:])
+ value = value[:-3]
+ return ",".join(reversed(parts))
+
+
+class CSVLocale(Locale):
+ """Locale implementation using tornado's CSV translation format."""
+ def translate(self, message, plural_message=None, count=None):
+ if plural_message is not None:
+ assert count is not None
+ if count != 1:
+ message = plural_message
+ message_dict = self.translations.get("plural", {})
+ else:
+ message_dict = self.translations.get("singular", {})
+ else:
+ message_dict = self.translations.get("unknown", {})
+ return message_dict.get(message, message)
+
+ def pgettext(self, context, message, plural_message=None, count=None):
+ if self.translations:
+ gen_log.warning('pgettext is not supported by CSVLocale')
+ return self.translate(message, plural_message, count)
+
+
+class GettextLocale(Locale):
+ """Locale implementation using the `gettext` module."""
+ def __init__(self, code, translations):
+ try:
+ # python 2
+ self.ngettext = translations.ungettext
+ self.gettext = translations.ugettext
+ except AttributeError:
+ # python 3
+ self.ngettext = translations.ngettext
+ self.gettext = translations.gettext
+ # self.gettext must exist before __init__ is called, since it
+ # calls into self.translate
+ super(GettextLocale, self).__init__(code, translations)
+
+ def translate(self, message, plural_message=None, count=None):
+ if plural_message is not None:
+ assert count is not None
+ return self.ngettext(message, plural_message, count)
+ else:
+ return self.gettext(message)
+
+ def pgettext(self, context, message, plural_message=None, count=None):
+ """Allows to set context for translation, accepts plural forms.
+
+ Usage example::
+
+ pgettext("law", "right")
+ pgettext("good", "right")
+
+ Plural message example::
+
+ pgettext("organization", "club", "clubs", len(clubs))
+ pgettext("stick", "club", "clubs", len(clubs))
+
+ To generate POT file with context, add following options to step 1
+ of `load_gettext_translations` sequence::
+
+ xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3
+
+ .. versionadded:: 4.2
+ """
+ if plural_message is not None:
+ assert count is not None
+ msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, message),
+ "%s%s%s" % (context, CONTEXT_SEPARATOR, plural_message),
+ count)
+ result = self.ngettext(*msgs_with_ctxt)
+ if CONTEXT_SEPARATOR in result:
+ # Translation not found
+ result = self.ngettext(message, plural_message, count)
+ return result
+ else:
+ msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
+ result = self.gettext(msg_with_ctxt)
+ if CONTEXT_SEPARATOR in result:
+ # Translation not found
+ result = message
+ return result
diff --git a/contrib/python/tornado/tornado-4/tornado/locks.py b/contrib/python/tornado/tornado-4/tornado/locks.py
index 4f9ecf6dfd..6099c9f95a 100644
--- a/contrib/python/tornado/tornado-4/tornado/locks.py
+++ b/contrib/python/tornado/tornado-4/tornado/locks.py
@@ -1,512 +1,512 @@
-# Copyright 2015 The Tornado Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from __future__ import absolute_import, division, print_function
-
-import collections
-
-from tornado import gen, ioloop
-from tornado.concurrent import Future
-
-__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
-
-
-class _TimeoutGarbageCollector(object):
- """Base class for objects that periodically clean up timed-out waiters.
-
- Avoids memory leak in a common pattern like:
-
- while True:
- yield condition.wait(short_timeout)
- print('looping....')
- """
- def __init__(self):
- self._waiters = collections.deque() # Futures.
- self._timeouts = 0
-
- def _garbage_collect(self):
- # Occasionally clear timed-out waiters.
- self._timeouts += 1
- if self._timeouts > 100:
- self._timeouts = 0
- self._waiters = collections.deque(
- w for w in self._waiters if not w.done())
-
-
-class Condition(_TimeoutGarbageCollector):
- """A condition allows one or more coroutines to wait until notified.
-
- Like a standard `threading.Condition`, but does not need an underlying lock
- that is acquired and released.
-
- With a `Condition`, coroutines can wait to be notified by other coroutines:
-
- .. testcode::
-
- from tornado import gen
- from tornado.ioloop import IOLoop
- from tornado.locks import Condition
-
- condition = Condition()
-
- @gen.coroutine
- def waiter():
- print("I'll wait right here")
- yield condition.wait() # Yield a Future.
- print("I'm done waiting")
-
- @gen.coroutine
- def notifier():
- print("About to notify")
- condition.notify()
- print("Done notifying")
-
- @gen.coroutine
- def runner():
- # Yield two Futures; wait for waiter() and notifier() to finish.
- yield [waiter(), notifier()]
-
- IOLoop.current().run_sync(runner)
-
- .. testoutput::
-
- I'll wait right here
- About to notify
- Done notifying
- I'm done waiting
-
- `wait` takes an optional ``timeout`` argument, which is either an absolute
- timestamp::
-
- io_loop = IOLoop.current()
-
- # Wait up to 1 second for a notification.
- yield condition.wait(timeout=io_loop.time() + 1)
-
- ...or a `datetime.timedelta` for a timeout relative to the current time::
-
- # Wait up to 1 second.
- yield condition.wait(timeout=datetime.timedelta(seconds=1))
-
- The method raises `tornado.gen.TimeoutError` if there's no notification
- before the deadline.
- """
-
- def __init__(self):
- super(Condition, self).__init__()
- self.io_loop = ioloop.IOLoop.current()
-
- def __repr__(self):
- result = '<%s' % (self.__class__.__name__, )
- if self._waiters:
- result += ' waiters[%s]' % len(self._waiters)
- return result + '>'
-
- def wait(self, timeout=None):
- """Wait for `.notify`.
-
- Returns a `.Future` that resolves ``True`` if the condition is notified,
- or ``False`` after a timeout.
- """
- waiter = Future()
- self._waiters.append(waiter)
- if timeout:
- def on_timeout():
- waiter.set_result(False)
- self._garbage_collect()
- io_loop = ioloop.IOLoop.current()
- timeout_handle = io_loop.add_timeout(timeout, on_timeout)
- waiter.add_done_callback(
- lambda _: io_loop.remove_timeout(timeout_handle))
- return waiter
-
- def notify(self, n=1):
- """Wake ``n`` waiters."""
- waiters = [] # Waiters we plan to run right now.
- while n and self._waiters:
- waiter = self._waiters.popleft()
- if not waiter.done(): # Might have timed out.
- n -= 1
- waiters.append(waiter)
-
- for waiter in waiters:
- waiter.set_result(True)
-
- def notify_all(self):
- """Wake all waiters."""
- self.notify(len(self._waiters))
-
-
-class Event(object):
- """An event blocks coroutines until its internal flag is set to True.
-
- Similar to `threading.Event`.
-
- A coroutine can wait for an event to be set. Once it is set, calls to
- ``yield event.wait()`` will not block unless the event has been cleared:
-
- .. testcode::
-
- from tornado import gen
- from tornado.ioloop import IOLoop
- from tornado.locks import Event
-
- event = Event()
-
- @gen.coroutine
- def waiter():
- print("Waiting for event")
- yield event.wait()
- print("Not waiting this time")
- yield event.wait()
- print("Done")
-
- @gen.coroutine
- def setter():
- print("About to set the event")
- event.set()
-
- @gen.coroutine
- def runner():
- yield [waiter(), setter()]
-
- IOLoop.current().run_sync(runner)
-
- .. testoutput::
-
- Waiting for event
- About to set the event
- Not waiting this time
- Done
- """
- def __init__(self):
- self._future = Future()
-
- def __repr__(self):
- return '<%s %s>' % (
- self.__class__.__name__, 'set' if self.is_set() else 'clear')
-
- def is_set(self):
- """Return ``True`` if the internal flag is true."""
- return self._future.done()
-
- def set(self):
- """Set the internal flag to ``True``. All waiters are awakened.
-
- Calling `.wait` once the flag is set will not block.
- """
- if not self._future.done():
- self._future.set_result(None)
-
- def clear(self):
- """Reset the internal flag to ``False``.
-
- Calls to `.wait` will block until `.set` is called.
- """
- if self._future.done():
- self._future = Future()
-
- def wait(self, timeout=None):
- """Block until the internal flag is true.
-
- Returns a Future, which raises `tornado.gen.TimeoutError` after a
- timeout.
- """
- if timeout is None:
- return self._future
- else:
- return gen.with_timeout(timeout, self._future)
-
-
-class _ReleasingContextManager(object):
- """Releases a Lock or Semaphore at the end of a "with" statement.
-
- with (yield semaphore.acquire()):
- pass
-
- # Now semaphore.release() has been called.
- """
- def __init__(self, obj):
- self._obj = obj
-
- def __enter__(self):
- pass
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self._obj.release()
-
-
-class Semaphore(_TimeoutGarbageCollector):
- """A lock that can be acquired a fixed number of times before blocking.
-
- A Semaphore manages a counter representing the number of `.release` calls
- minus the number of `.acquire` calls, plus an initial value. The `.acquire`
- method blocks if necessary until it can return without making the counter
- negative.
-
- Semaphores limit access to a shared resource. To allow access for two
- workers at a time:
-
- .. testsetup:: semaphore
-
- from collections import deque
-
- from tornado import gen
- from tornado.ioloop import IOLoop
- from tornado.concurrent import Future
-
- # Ensure reliable doctest output: resolve Futures one at a time.
- futures_q = deque([Future() for _ in range(3)])
-
- @gen.coroutine
- def simulator(futures):
- for f in futures:
- yield gen.moment
- f.set_result(None)
-
- IOLoop.current().add_callback(simulator, list(futures_q))
-
- def use_some_resource():
- return futures_q.popleft()
-
- .. testcode:: semaphore
-
- from tornado import gen
- from tornado.ioloop import IOLoop
- from tornado.locks import Semaphore
-
- sem = Semaphore(2)
-
- @gen.coroutine
- def worker(worker_id):
- yield sem.acquire()
- try:
- print("Worker %d is working" % worker_id)
- yield use_some_resource()
- finally:
- print("Worker %d is done" % worker_id)
- sem.release()
-
- @gen.coroutine
- def runner():
- # Join all workers.
- yield [worker(i) for i in range(3)]
-
- IOLoop.current().run_sync(runner)
-
- .. testoutput:: semaphore
-
- Worker 0 is working
- Worker 1 is working
- Worker 0 is done
- Worker 2 is working
- Worker 1 is done
- Worker 2 is done
-
- Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until
- the semaphore has been released once, by worker 0.
-
- `.acquire` is a context manager, so ``worker`` could be written as::
-
- @gen.coroutine
- def worker(worker_id):
- with (yield sem.acquire()):
- print("Worker %d is working" % worker_id)
- yield use_some_resource()
-
- # Now the semaphore has been released.
- print("Worker %d is done" % worker_id)
-
- In Python 3.5, the semaphore itself can be used as an async context
- manager::
-
- async def worker(worker_id):
- async with sem:
- print("Worker %d is working" % worker_id)
- await use_some_resource()
-
- # Now the semaphore has been released.
- print("Worker %d is done" % worker_id)
-
- .. versionchanged:: 4.3
- Added ``async with`` support in Python 3.5.
- """
- def __init__(self, value=1):
- super(Semaphore, self).__init__()
- if value < 0:
- raise ValueError('semaphore initial value must be >= 0')
-
- self._value = value
-
- def __repr__(self):
- res = super(Semaphore, self).__repr__()
- extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format(
- self._value)
- if self._waiters:
- extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
- return '<{0} [{1}]>'.format(res[1:-1], extra)
-
- def release(self):
- """Increment the counter and wake one waiter."""
- self._value += 1
- while self._waiters:
- waiter = self._waiters.popleft()
- if not waiter.done():
- self._value -= 1
-
- # If the waiter is a coroutine paused at
- #
- # with (yield semaphore.acquire()):
- #
- # then the context manager's __exit__ calls release() at the end
- # of the "with" block.
- waiter.set_result(_ReleasingContextManager(self))
- break
-
- def acquire(self, timeout=None):
- """Decrement the counter. Returns a Future.
-
- Block if the counter is zero and wait for a `.release`. The Future
- raises `.TimeoutError` after the deadline.
- """
- waiter = Future()
- if self._value > 0:
- self._value -= 1
- waiter.set_result(_ReleasingContextManager(self))
- else:
- self._waiters.append(waiter)
- if timeout:
- def on_timeout():
- waiter.set_exception(gen.TimeoutError())
- self._garbage_collect()
- io_loop = ioloop.IOLoop.current()
- timeout_handle = io_loop.add_timeout(timeout, on_timeout)
- waiter.add_done_callback(
- lambda _: io_loop.remove_timeout(timeout_handle))
- return waiter
-
- def __enter__(self):
- raise RuntimeError(
- "Use Semaphore like 'with (yield semaphore.acquire())', not like"
- " 'with semaphore'")
-
- __exit__ = __enter__
-
- @gen.coroutine
- def __aenter__(self):
- yield self.acquire()
-
- @gen.coroutine
- def __aexit__(self, typ, value, tb):
- self.release()
-
-
-class BoundedSemaphore(Semaphore):
- """A semaphore that prevents release() being called too many times.
-
- If `.release` would increment the semaphore's value past the initial
- value, it raises `ValueError`. Semaphores are mostly used to guard
- resources with limited capacity, so a semaphore released too many times
- is a sign of a bug.
- """
- def __init__(self, value=1):
- super(BoundedSemaphore, self).__init__(value=value)
- self._initial_value = value
-
- def release(self):
- """Increment the counter and wake one waiter."""
- if self._value >= self._initial_value:
- raise ValueError("Semaphore released too many times")
- super(BoundedSemaphore, self).release()
-
-
-class Lock(object):
- """A lock for coroutines.
-
- A Lock begins unlocked, and `acquire` locks it immediately. While it is
- locked, a coroutine that yields `acquire` waits until another coroutine
- calls `release`.
-
- Releasing an unlocked lock raises `RuntimeError`.
-
- `acquire` supports the context manager protocol in all Python versions:
-
- >>> from tornado import gen, locks
- >>> lock = locks.Lock()
- >>>
- >>> @gen.coroutine
- ... def f():
- ... with (yield lock.acquire()):
- ... # Do something holding the lock.
- ... pass
- ...
- ... # Now the lock is released.
-
- In Python 3.5, `Lock` also supports the async context manager
- protocol. Note that in this case there is no `acquire`, because
- ``async with`` includes both the ``yield`` and the ``acquire``
- (just as it does with `threading.Lock`):
-
- >>> async def f(): # doctest: +SKIP
- ... async with lock:
- ... # Do something holding the lock.
- ... pass
- ...
- ... # Now the lock is released.
-
- .. versionchanged:: 4.3
- Added ``async with`` support in Python 3.5.
-
- """
- def __init__(self):
- self._block = BoundedSemaphore(value=1)
-
- def __repr__(self):
- return "<%s _block=%s>" % (
- self.__class__.__name__,
- self._block)
-
- def acquire(self, timeout=None):
- """Attempt to lock. Returns a Future.
-
- Returns a Future, which raises `tornado.gen.TimeoutError` after a
- timeout.
- """
- return self._block.acquire(timeout)
-
- def release(self):
- """Unlock.
-
- The first coroutine in line waiting for `acquire` gets the lock.
-
- If not locked, raise a `RuntimeError`.
- """
- try:
- self._block.release()
- except ValueError:
- raise RuntimeError('release unlocked lock')
-
- def __enter__(self):
- raise RuntimeError(
- "Use Lock like 'with (yield lock)', not like 'with lock'")
-
- __exit__ = __enter__
-
- @gen.coroutine
- def __aenter__(self):
- yield self.acquire()
-
- @gen.coroutine
- def __aexit__(self, typ, value, tb):
- self.release()
+# Copyright 2015 The Tornado Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+
+from tornado import gen, ioloop
+from tornado.concurrent import Future
+
+__all__ = ['Condition', 'Event', 'Semaphore', 'BoundedSemaphore', 'Lock']
+
+
+class _TimeoutGarbageCollector(object):
+ """Base class for objects that periodically clean up timed-out waiters.
+
+ Avoids memory leak in a common pattern like:
+
+ while True:
+ yield condition.wait(short_timeout)
+ print('looping....')
+ """
+ def __init__(self):
+ self._waiters = collections.deque() # Futures.
+ self._timeouts = 0
+
+ def _garbage_collect(self):
+ # Occasionally clear timed-out waiters.
+ self._timeouts += 1
+ if self._timeouts > 100:
+ self._timeouts = 0
+ self._waiters = collections.deque(
+ w for w in self._waiters if not w.done())
+
+
+class Condition(_TimeoutGarbageCollector):
+ """A condition allows one or more coroutines to wait until notified.
+
+ Like a standard `threading.Condition`, but does not need an underlying lock
+ that is acquired and released.
+
+ With a `Condition`, coroutines can wait to be notified by other coroutines:
+
+ .. testcode::
+
+ from tornado import gen
+ from tornado.ioloop import IOLoop
+ from tornado.locks import Condition
+
+ condition = Condition()
+
+ @gen.coroutine
+ def waiter():
+ print("I'll wait right here")
+ yield condition.wait() # Yield a Future.
+ print("I'm done waiting")
+
+ @gen.coroutine
+ def notifier():
+ print("About to notify")
+ condition.notify()
+ print("Done notifying")
+
+ @gen.coroutine
+ def runner():
+ # Yield two Futures; wait for waiter() and notifier() to finish.
+ yield [waiter(), notifier()]
+
+ IOLoop.current().run_sync(runner)
+
+ .. testoutput::
+
+ I'll wait right here
+ About to notify
+ Done notifying
+ I'm done waiting
+
+ `wait` takes an optional ``timeout`` argument, which is either an absolute
+ timestamp::
+
+ io_loop = IOLoop.current()
+
+ # Wait up to 1 second for a notification.
+ yield condition.wait(timeout=io_loop.time() + 1)
+
+ ...or a `datetime.timedelta` for a timeout relative to the current time::
+
+ # Wait up to 1 second.
+ yield condition.wait(timeout=datetime.timedelta(seconds=1))
+
+ The method raises `tornado.gen.TimeoutError` if there's no notification
+ before the deadline.
+ """
+
+ def __init__(self):
+ super(Condition, self).__init__()
+ self.io_loop = ioloop.IOLoop.current()
+
+ def __repr__(self):
+ result = '<%s' % (self.__class__.__name__, )
+ if self._waiters:
+ result += ' waiters[%s]' % len(self._waiters)
+ return result + '>'
+
+ def wait(self, timeout=None):
+ """Wait for `.notify`.
+
+ Returns a `.Future` that resolves ``True`` if the condition is notified,
+ or ``False`` after a timeout.
+ """
+ waiter = Future()
+ self._waiters.append(waiter)
+ if timeout:
+ def on_timeout():
+ waiter.set_result(False)
+ self._garbage_collect()
+ io_loop = ioloop.IOLoop.current()
+ timeout_handle = io_loop.add_timeout(timeout, on_timeout)
+ waiter.add_done_callback(
+ lambda _: io_loop.remove_timeout(timeout_handle))
+ return waiter
+
+ def notify(self, n=1):
+ """Wake ``n`` waiters."""
+ waiters = [] # Waiters we plan to run right now.
+ while n and self._waiters:
+ waiter = self._waiters.popleft()
+ if not waiter.done(): # Might have timed out.
+ n -= 1
+ waiters.append(waiter)
+
+ for waiter in waiters:
+ waiter.set_result(True)
+
+ def notify_all(self):
+ """Wake all waiters."""
+ self.notify(len(self._waiters))
+
+
+class Event(object):
+ """An event blocks coroutines until its internal flag is set to True.
+
+ Similar to `threading.Event`.
+
+ A coroutine can wait for an event to be set. Once it is set, calls to
+ ``yield event.wait()`` will not block unless the event has been cleared:
+
+ .. testcode::
+
+ from tornado import gen
+ from tornado.ioloop import IOLoop
+ from tornado.locks import Event
+
+ event = Event()
+
+ @gen.coroutine
+ def waiter():
+ print("Waiting for event")
+ yield event.wait()
+ print("Not waiting this time")
+ yield event.wait()
+ print("Done")
+
+ @gen.coroutine
+ def setter():
+ print("About to set the event")
+ event.set()
+
+ @gen.coroutine
+ def runner():
+ yield [waiter(), setter()]
+
+ IOLoop.current().run_sync(runner)
+
+ .. testoutput::
+
+ Waiting for event
+ About to set the event
+ Not waiting this time
+ Done
+ """
+ def __init__(self):
+ self._future = Future()
+
+ def __repr__(self):
+ return '<%s %s>' % (
+ self.__class__.__name__, 'set' if self.is_set() else 'clear')
+
+ def is_set(self):
+ """Return ``True`` if the internal flag is true."""
+ return self._future.done()
+
+ def set(self):
+ """Set the internal flag to ``True``. All waiters are awakened.
+
+ Calling `.wait` once the flag is set will not block.
+ """
+ if not self._future.done():
+ self._future.set_result(None)
+
+ def clear(self):
+ """Reset the internal flag to ``False``.
+
+ Calls to `.wait` will block until `.set` is called.
+ """
+ if self._future.done():
+ self._future = Future()
+
+ def wait(self, timeout=None):
+ """Block until the internal flag is true.
+
+ Returns a Future, which raises `tornado.gen.TimeoutError` after a
+ timeout.
+ """
+ if timeout is None:
+ return self._future
+ else:
+ return gen.with_timeout(timeout, self._future)
+
+
+class _ReleasingContextManager(object):
+ """Releases a Lock or Semaphore at the end of a "with" statement.
+
+ with (yield semaphore.acquire()):
+ pass
+
+ # Now semaphore.release() has been called.
+ """
+ def __init__(self, obj):
+ self._obj = obj
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._obj.release()
+
+
+class Semaphore(_TimeoutGarbageCollector):
+ """A lock that can be acquired a fixed number of times before blocking.
+
+ A Semaphore manages a counter representing the number of `.release` calls
+ minus the number of `.acquire` calls, plus an initial value. The `.acquire`
+ method blocks if necessary until it can return without making the counter
+ negative.
+
+ Semaphores limit access to a shared resource. To allow access for two
+ workers at a time:
+
+ .. testsetup:: semaphore
+
+ from collections import deque
+
+ from tornado import gen
+ from tornado.ioloop import IOLoop
+ from tornado.concurrent import Future
+
+ # Ensure reliable doctest output: resolve Futures one at a time.
+ futures_q = deque([Future() for _ in range(3)])
+
+ @gen.coroutine
+ def simulator(futures):
+ for f in futures:
+ yield gen.moment
+ f.set_result(None)
+
+ IOLoop.current().add_callback(simulator, list(futures_q))
+
+ def use_some_resource():
+ return futures_q.popleft()
+
+ .. testcode:: semaphore
+
+ from tornado import gen
+ from tornado.ioloop import IOLoop
+ from tornado.locks import Semaphore
+
+ sem = Semaphore(2)
+
+ @gen.coroutine
+ def worker(worker_id):
+ yield sem.acquire()
+ try:
+ print("Worker %d is working" % worker_id)
+ yield use_some_resource()
+ finally:
+ print("Worker %d is done" % worker_id)
+ sem.release()
+
+ @gen.coroutine
+ def runner():
+ # Join all workers.
+ yield [worker(i) for i in range(3)]
+
+ IOLoop.current().run_sync(runner)
+
+ .. testoutput:: semaphore
+
+ Worker 0 is working
+ Worker 1 is working
+ Worker 0 is done
+ Worker 2 is working
+ Worker 1 is done
+ Worker 2 is done
+
+ Workers 0 and 1 are allowed to run concurrently, but worker 2 waits until
+ the semaphore has been released once, by worker 0.
+
+ `.acquire` is a context manager, so ``worker`` could be written as::
+
+ @gen.coroutine
+ def worker(worker_id):
+ with (yield sem.acquire()):
+ print("Worker %d is working" % worker_id)
+ yield use_some_resource()
+
+ # Now the semaphore has been released.
+ print("Worker %d is done" % worker_id)
+
+ In Python 3.5, the semaphore itself can be used as an async context
+ manager::
+
+ async def worker(worker_id):
+ async with sem:
+ print("Worker %d is working" % worker_id)
+ await use_some_resource()
+
+ # Now the semaphore has been released.
+ print("Worker %d is done" % worker_id)
+
+ .. versionchanged:: 4.3
+ Added ``async with`` support in Python 3.5.
+ """
+ def __init__(self, value=1):
+ super(Semaphore, self).__init__()
+ if value < 0:
+ raise ValueError('semaphore initial value must be >= 0')
+
+ self._value = value
+
+ def __repr__(self):
+ res = super(Semaphore, self).__repr__()
+ extra = 'locked' if self._value == 0 else 'unlocked,value:{0}'.format(
+ self._value)
+ if self._waiters:
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
+
+ def release(self):
+ """Increment the counter and wake one waiter."""
+ self._value += 1
+ while self._waiters:
+ waiter = self._waiters.popleft()
+ if not waiter.done():
+ self._value -= 1
+
+ # If the waiter is a coroutine paused at
+ #
+ # with (yield semaphore.acquire()):
+ #
+ # then the context manager's __exit__ calls release() at the end
+ # of the "with" block.
+ waiter.set_result(_ReleasingContextManager(self))
+ break
+
+ def acquire(self, timeout=None):
+ """Decrement the counter. Returns a Future.
+
+ Block if the counter is zero and wait for a `.release`. The Future
+ raises `.TimeoutError` after the deadline.
+ """
+ waiter = Future()
+ if self._value > 0:
+ self._value -= 1
+ waiter.set_result(_ReleasingContextManager(self))
+ else:
+ self._waiters.append(waiter)
+ if timeout:
+ def on_timeout():
+ waiter.set_exception(gen.TimeoutError())
+ self._garbage_collect()
+ io_loop = ioloop.IOLoop.current()
+ timeout_handle = io_loop.add_timeout(timeout, on_timeout)
+ waiter.add_done_callback(
+ lambda _: io_loop.remove_timeout(timeout_handle))
+ return waiter
+
+ def __enter__(self):
+ raise RuntimeError(
+ "Use Semaphore like 'with (yield semaphore.acquire())', not like"
+ " 'with semaphore'")
+
+ __exit__ = __enter__
+
+ @gen.coroutine
+ def __aenter__(self):
+ yield self.acquire()
+
+ @gen.coroutine
+ def __aexit__(self, typ, value, tb):
+ self.release()
+
+
+class BoundedSemaphore(Semaphore):
+ """A semaphore that prevents release() being called too many times.
+
+ If `.release` would increment the semaphore's value past the initial
+ value, it raises `ValueError`. Semaphores are mostly used to guard
+ resources with limited capacity, so a semaphore released too many times
+ is a sign of a bug.
+ """
+ def __init__(self, value=1):
+ super(BoundedSemaphore, self).__init__(value=value)
+ self._initial_value = value
+
+ def release(self):
+ """Increment the counter and wake one waiter."""
+ if self._value >= self._initial_value:
+ raise ValueError("Semaphore released too many times")
+ super(BoundedSemaphore, self).release()
+
+
+class Lock(object):
+ """A lock for coroutines.
+
+ A Lock begins unlocked, and `acquire` locks it immediately. While it is
+ locked, a coroutine that yields `acquire` waits until another coroutine
+ calls `release`.
+
+ Releasing an unlocked lock raises `RuntimeError`.
+
+ `acquire` supports the context manager protocol in all Python versions:
+
+ >>> from tornado import gen, locks
+ >>> lock = locks.Lock()
+ >>>
+ >>> @gen.coroutine
+ ... def f():
+ ... with (yield lock.acquire()):
+ ... # Do something holding the lock.
+ ... pass
+ ...
+ ... # Now the lock is released.
+
+ In Python 3.5, `Lock` also supports the async context manager
+ protocol. Note that in this case there is no `acquire`, because
+ ``async with`` includes both the ``yield`` and the ``acquire``
+ (just as it does with `threading.Lock`):
+
+ >>> async def f(): # doctest: +SKIP
+ ... async with lock:
+ ... # Do something holding the lock.
+ ... pass
+ ...
+ ... # Now the lock is released.
+
+ .. versionchanged:: 4.3
+ Added ``async with`` support in Python 3.5.
+
+ """
+ def __init__(self):
+ self._block = BoundedSemaphore(value=1)
+
+ def __repr__(self):
+ return "<%s _block=%s>" % (
+ self.__class__.__name__,
+ self._block)
+
+ def acquire(self, timeout=None):
+ """Attempt to lock. Returns a Future.
+
+ Returns a Future, which raises `tornado.gen.TimeoutError` after a
+ timeout.
+ """
+ return self._block.acquire(timeout)
+
+ def release(self):
+ """Unlock.
+
+ The first coroutine in line waiting for `acquire` gets the lock.
+
+ If not locked, raise a `RuntimeError`.
+ """
+ try:
+ self._block.release()
+ except ValueError:
+ raise RuntimeError('release unlocked lock')
+
+ def __enter__(self):
+ raise RuntimeError(
+ "Use Lock like 'with (yield lock)', not like 'with lock'")
+
+ __exit__ = __enter__
+
+ @gen.coroutine
+ def __aenter__(self):
+ yield self.acquire()
+
+ @gen.coroutine
+ def __aexit__(self, typ, value, tb):
+ self.release()
diff --git a/contrib/python/tornado/tornado-4/tornado/log.py b/contrib/python/tornado/tornado-4/tornado/log.py
index 654afc021e..b5ddb75e98 100644
--- a/contrib/python/tornado/tornado-4/tornado/log.py
+++ b/contrib/python/tornado/tornado-4/tornado/log.py
@@ -1,290 +1,290 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Logging support for Tornado.
-
-Tornado uses three logger streams:
-
-* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
- potentially other servers in the future)
-* ``tornado.application``: Logging of errors from application code (i.e.
- uncaught exceptions from callbacks)
-* ``tornado.general``: General-purpose logging, including any errors
- or warnings from Tornado itself.
-
-These streams may be configured independently using the standard library's
-`logging` module. For example, you may wish to send ``tornado.access`` logs
-to a separate file for analysis.
-"""
-from __future__ import absolute_import, division, print_function
-
-import logging
-import logging.handlers
-import sys
-
-from tornado.escape import _unicode
-from tornado.util import unicode_type, basestring_type
-
-try:
- import colorama
-except ImportError:
- colorama = None
-
-try:
- import curses # type: ignore
-except ImportError:
- curses = None
-
-# Logger objects for internal tornado use
-access_log = logging.getLogger("tornado.access")
-app_log = logging.getLogger("tornado.application")
-gen_log = logging.getLogger("tornado.general")
-
-
-def _stderr_supports_color():
- try:
- if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
- if curses:
- curses.setupterm()
- if curses.tigetnum("colors") > 0:
- return True
- elif colorama:
- if sys.stderr is getattr(colorama.initialise, 'wrapped_stderr',
- object()):
- return True
- except Exception:
- # Very broad exception handling because it's always better to
- # fall back to non-colored logs than to break at startup.
- pass
- return False
-
-
-def _safe_unicode(s):
- try:
- return _unicode(s)
- except UnicodeDecodeError:
- return repr(s)
-
-
-class LogFormatter(logging.Formatter):
- """Log formatter used in Tornado.
-
- Key features of this formatter are:
-
- * Color support when logging to a terminal that supports it.
- * Timestamps on every log line.
- * Robust against str/bytes encoding problems.
-
- This formatter is enabled automatically by
- `tornado.options.parse_command_line` or `tornado.options.parse_config_file`
- (unless ``--logging=none`` is used).
-
- Color support on Windows versions that do not support ANSI color codes is
- enabled by use of the colorama__ library. Applications that wish to use
- this must first initialize colorama with a call to ``colorama.init``.
- See the colorama documentation for details.
-
- __ https://pypi.python.org/pypi/colorama
-
- .. versionchanged:: 4.5
- Added support for ``colorama``. Changed the constructor
- signature to be compatible with `logging.config.dictConfig`.
- """
- DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
- DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
- DEFAULT_COLORS = {
- logging.DEBUG: 4, # Blue
- logging.INFO: 2, # Green
- logging.WARNING: 3, # Yellow
- logging.ERROR: 1, # Red
- }
-
- def __init__(self, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
- style='%', color=True, colors=DEFAULT_COLORS):
- r"""
- :arg bool color: Enables color support.
- :arg string fmt: Log message format.
- It will be applied to the attributes dict of log records. The
- text between ``%(color)s`` and ``%(end_color)s`` will be colored
- depending on the level if color support is on.
- :arg dict colors: color mappings from logging level to terminal color
- code
- :arg string datefmt: Datetime format.
- Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
-
- .. versionchanged:: 3.2
-
- Added ``fmt`` and ``datefmt`` arguments.
- """
- logging.Formatter.__init__(self, datefmt=datefmt)
- self._fmt = fmt
-
- self._colors = {}
- if color and _stderr_supports_color():
- if curses is not None:
- # The curses module has some str/bytes confusion in
- # python3. Until version 3.2.3, most methods return
- # bytes, but only accept strings. In addition, we want to
- # output these strings with the logging module, which
- # works with unicode strings. The explicit calls to
- # unicode() below are harmless in python2 but will do the
- # right conversion in python 3.
- fg_color = (curses.tigetstr("setaf") or
- curses.tigetstr("setf") or "")
- if (3, 0) < sys.version_info < (3, 2, 3):
- fg_color = unicode_type(fg_color, "ascii")
-
- for levelno, code in colors.items():
- self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
- self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
- else:
- # If curses is not present (currently we'll only get here for
- # colorama on windows), assume hard-coded ANSI color codes.
- for levelno, code in colors.items():
- self._colors[levelno] = '\033[2;3%dm' % code
- self._normal = '\033[0m'
- else:
- self._normal = ''
-
- def format(self, record):
- try:
- message = record.getMessage()
- assert isinstance(message, basestring_type) # guaranteed by logging
- # Encoding notes: The logging module prefers to work with character
- # strings, but only enforces that log messages are instances of
- # basestring. In python 2, non-ascii bytestrings will make
- # their way through the logging framework until they blow up with
- # an unhelpful decoding error (with this formatter it happens
- # when we attach the prefix, but there are other opportunities for
- # exceptions further along in the framework).
- #
- # If a byte string makes it this far, convert it to unicode to
- # ensure it will make it out to the logs. Use repr() as a fallback
- # to ensure that all byte strings can be converted successfully,
- # but don't do it by default so we don't add extra quotes to ascii
- # bytestrings. This is a bit of a hacky place to do this, but
- # it's worth it since the encoding errors that would otherwise
- # result are so useless (and tornado is fond of using utf8-encoded
- # byte strings whereever possible).
- record.message = _safe_unicode(message)
- except Exception as e:
- record.message = "Bad message (%r): %r" % (e, record.__dict__)
-
- record.asctime = self.formatTime(record, self.datefmt)
-
- if record.levelno in self._colors:
- record.color = self._colors[record.levelno]
- record.end_color = self._normal
- else:
- record.color = record.end_color = ''
-
- formatted = self._fmt % record.__dict__
-
- if record.exc_info:
- if not record.exc_text:
- record.exc_text = self.formatException(record.exc_info)
- if record.exc_text:
- # exc_text contains multiple lines. We need to _safe_unicode
- # each line separately so that non-utf8 bytes don't cause
- # all the newlines to turn into '\n'.
- lines = [formatted.rstrip()]
- lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
- formatted = '\n'.join(lines)
- return formatted.replace("\n", "\n ")
-
-
-def enable_pretty_logging(options=None, logger=None):
- """Turns on formatted logging output as configured.
-
- This is called automatically by `tornado.options.parse_command_line`
- and `tornado.options.parse_config_file`.
- """
- if options is None:
- import tornado.options
- options = tornado.options.options
- if options.logging is None or options.logging.lower() == 'none':
- return
- if logger is None:
- logger = logging.getLogger()
- logger.setLevel(getattr(logging, options.logging.upper()))
- if options.log_file_prefix:
- rotate_mode = options.log_rotate_mode
- if rotate_mode == 'size':
- channel = logging.handlers.RotatingFileHandler(
- filename=options.log_file_prefix,
- maxBytes=options.log_file_max_size,
- backupCount=options.log_file_num_backups)
- elif rotate_mode == 'time':
- channel = logging.handlers.TimedRotatingFileHandler(
- filename=options.log_file_prefix,
- when=options.log_rotate_when,
- interval=options.log_rotate_interval,
- backupCount=options.log_file_num_backups)
- else:
- error_message = 'The value of log_rotate_mode option should be ' +\
- '"size" or "time", not "%s".' % rotate_mode
- raise ValueError(error_message)
- channel.setFormatter(LogFormatter(color=False))
- logger.addHandler(channel)
-
- if (options.log_to_stderr or
- (options.log_to_stderr is None and not logger.handlers)):
- # Set up color if we are in a tty and curses is installed
- channel = logging.StreamHandler()
- channel.setFormatter(LogFormatter())
- logger.addHandler(channel)
-
-
-def define_logging_options(options=None):
- """Add logging-related flags to ``options``.
-
- These options are present automatically on the default options instance;
- this method is only necessary if you have created your own `.OptionParser`.
-
- .. versionadded:: 4.2
- This function existed in prior versions but was broken and undocumented until 4.2.
- """
- if options is None:
- # late import to prevent cycle
- import tornado.options
- options = tornado.options.options
- options.define("logging", default="info",
- help=("Set the Python log level. If 'none', tornado won't touch the "
- "logging configuration."),
- metavar="debug|info|warning|error|none")
- options.define("log_to_stderr", type=bool, default=None,
- help=("Send log output to stderr (colorized if possible). "
- "By default use stderr if --log_file_prefix is not set and "
- "no other logging is configured."))
- options.define("log_file_prefix", type=str, default=None, metavar="PATH",
- help=("Path prefix for log files. "
- "Note that if you are running multiple tornado processes, "
- "log_file_prefix must be different for each of them (e.g. "
- "include the port number)"))
- options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
- help="max size of log files before rollover")
- options.define("log_file_num_backups", type=int, default=10,
- help="number of log files to keep")
-
- options.define("log_rotate_when", type=str, default='midnight',
- help=("specify the type of TimedRotatingFileHandler interval "
- "other options:('S', 'M', 'H', 'D', 'W0'-'W6')"))
- options.define("log_rotate_interval", type=int, default=1,
- help="The interval value of timed rotating")
-
- options.define("log_rotate_mode", type=str, default='size',
- help="The mode of rotating files(time or size)")
-
- options.add_parse_callback(lambda: enable_pretty_logging(options))
+#!/usr/bin/env python
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Logging support for Tornado.
+
+Tornado uses three logger streams:
+
+* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
+ potentially other servers in the future)
+* ``tornado.application``: Logging of errors from application code (i.e.
+ uncaught exceptions from callbacks)
+* ``tornado.general``: General-purpose logging, including any errors
+ or warnings from Tornado itself.
+
+These streams may be configured independently using the standard library's
+`logging` module. For example, you may wish to send ``tornado.access`` logs
+to a separate file for analysis.
+"""
+from __future__ import absolute_import, division, print_function
+
+import logging
+import logging.handlers
+import sys
+
+from tornado.escape import _unicode
+from tornado.util import unicode_type, basestring_type
+
+try:
+ import colorama
+except ImportError:
+ colorama = None
+
+try:
+ import curses # type: ignore
+except ImportError:
+ curses = None
+
+# Logger objects for internal tornado use
+access_log = logging.getLogger("tornado.access")
+app_log = logging.getLogger("tornado.application")
+gen_log = logging.getLogger("tornado.general")
+
+
+def _stderr_supports_color():
+ try:
+ if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
+ if curses:
+ curses.setupterm()
+ if curses.tigetnum("colors") > 0:
+ return True
+ elif colorama:
+ if sys.stderr is getattr(colorama.initialise, 'wrapped_stderr',
+ object()):
+ return True
+ except Exception:
+ # Very broad exception handling because it's always better to
+ # fall back to non-colored logs than to break at startup.
+ pass
+ return False
+
+
+def _safe_unicode(s):
+ try:
+ return _unicode(s)
+ except UnicodeDecodeError:
+ return repr(s)
+
+
+class LogFormatter(logging.Formatter):
+ """Log formatter used in Tornado.
+
+ Key features of this formatter are:
+
+ * Color support when logging to a terminal that supports it.
+ * Timestamps on every log line.
+ * Robust against str/bytes encoding problems.
+
+ This formatter is enabled automatically by
+ `tornado.options.parse_command_line` or `tornado.options.parse_config_file`
+ (unless ``--logging=none`` is used).
+
+ Color support on Windows versions that do not support ANSI color codes is
+ enabled by use of the colorama__ library. Applications that wish to use
+ this must first initialize colorama with a call to ``colorama.init``.
+ See the colorama documentation for details.
+
+ __ https://pypi.python.org/pypi/colorama
+
+ .. versionchanged:: 4.5
+ Added support for ``colorama``. Changed the constructor
+ signature to be compatible with `logging.config.dictConfig`.
+ """
+ DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
+ DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
+ DEFAULT_COLORS = {
+ logging.DEBUG: 4, # Blue
+ logging.INFO: 2, # Green
+ logging.WARNING: 3, # Yellow
+ logging.ERROR: 1, # Red
+ }
+
+ def __init__(self, fmt=DEFAULT_FORMAT, datefmt=DEFAULT_DATE_FORMAT,
+ style='%', color=True, colors=DEFAULT_COLORS):
+ r"""
+ :arg bool color: Enables color support.
+ :arg string fmt: Log message format.
+ It will be applied to the attributes dict of log records. The
+ text between ``%(color)s`` and ``%(end_color)s`` will be colored
+ depending on the level if color support is on.
+ :arg dict colors: color mappings from logging level to terminal color
+ code
+ :arg string datefmt: Datetime format.
+ Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
+
+ .. versionchanged:: 3.2
+
+ Added ``fmt`` and ``datefmt`` arguments.
+ """
+ logging.Formatter.__init__(self, datefmt=datefmt)
+ self._fmt = fmt
+
+ self._colors = {}
+ if color and _stderr_supports_color():
+ if curses is not None:
+ # The curses module has some str/bytes confusion in
+ # python3. Until version 3.2.3, most methods return
+ # bytes, but only accept strings. In addition, we want to
+ # output these strings with the logging module, which
+ # works with unicode strings. The explicit calls to
+ # unicode() below are harmless in python2 but will do the
+ # right conversion in python 3.
+ fg_color = (curses.tigetstr("setaf") or
+ curses.tigetstr("setf") or "")
+ if (3, 0) < sys.version_info < (3, 2, 3):
+ fg_color = unicode_type(fg_color, "ascii")
+
+ for levelno, code in colors.items():
+ self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
+ self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
+ else:
+ # If curses is not present (currently we'll only get here for
+ # colorama on windows), assume hard-coded ANSI color codes.
+ for levelno, code in colors.items():
+ self._colors[levelno] = '\033[2;3%dm' % code
+ self._normal = '\033[0m'
+ else:
+ self._normal = ''
+
+ def format(self, record):
+ try:
+ message = record.getMessage()
+ assert isinstance(message, basestring_type) # guaranteed by logging
+ # Encoding notes: The logging module prefers to work with character
+ # strings, but only enforces that log messages are instances of
+ # basestring. In python 2, non-ascii bytestrings will make
+ # their way through the logging framework until they blow up with
+ # an unhelpful decoding error (with this formatter it happens
+ # when we attach the prefix, but there are other opportunities for
+ # exceptions further along in the framework).
+ #
+ # If a byte string makes it this far, convert it to unicode to
+ # ensure it will make it out to the logs. Use repr() as a fallback
+ # to ensure that all byte strings can be converted successfully,
+ # but don't do it by default so we don't add extra quotes to ascii
+ # bytestrings. This is a bit of a hacky place to do this, but
+ # it's worth it since the encoding errors that would otherwise
+ # result are so useless (and tornado is fond of using utf8-encoded
+ # byte strings whereever possible).
+ record.message = _safe_unicode(message)
+ except Exception as e:
+ record.message = "Bad message (%r): %r" % (e, record.__dict__)
+
+ record.asctime = self.formatTime(record, self.datefmt)
+
+ if record.levelno in self._colors:
+ record.color = self._colors[record.levelno]
+ record.end_color = self._normal
+ else:
+ record.color = record.end_color = ''
+
+ formatted = self._fmt % record.__dict__
+
+ if record.exc_info:
+ if not record.exc_text:
+ record.exc_text = self.formatException(record.exc_info)
+ if record.exc_text:
+ # exc_text contains multiple lines. We need to _safe_unicode
+ # each line separately so that non-utf8 bytes don't cause
+ # all the newlines to turn into '\n'.
+ lines = [formatted.rstrip()]
+ lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
+ formatted = '\n'.join(lines)
+ return formatted.replace("\n", "\n ")
+
+
+def enable_pretty_logging(options=None, logger=None):
+ """Turns on formatted logging output as configured.
+
+ This is called automatically by `tornado.options.parse_command_line`
+ and `tornado.options.parse_config_file`.
+ """
+ if options is None:
+ import tornado.options
+ options = tornado.options.options
+ if options.logging is None or options.logging.lower() == 'none':
+ return
+ if logger is None:
+ logger = logging.getLogger()
+ logger.setLevel(getattr(logging, options.logging.upper()))
+ if options.log_file_prefix:
+ rotate_mode = options.log_rotate_mode
+ if rotate_mode == 'size':
+ channel = logging.handlers.RotatingFileHandler(
+ filename=options.log_file_prefix,
+ maxBytes=options.log_file_max_size,
+ backupCount=options.log_file_num_backups)
+ elif rotate_mode == 'time':
+ channel = logging.handlers.TimedRotatingFileHandler(
+ filename=options.log_file_prefix,
+ when=options.log_rotate_when,
+ interval=options.log_rotate_interval,
+ backupCount=options.log_file_num_backups)
+ else:
+ error_message = 'The value of log_rotate_mode option should be ' +\
+ '"size" or "time", not "%s".' % rotate_mode
+ raise ValueError(error_message)
+ channel.setFormatter(LogFormatter(color=False))
+ logger.addHandler(channel)
+
+ if (options.log_to_stderr or
+ (options.log_to_stderr is None and not logger.handlers)):
+ # Set up color if we are in a tty and curses is installed
+ channel = logging.StreamHandler()
+ channel.setFormatter(LogFormatter())
+ logger.addHandler(channel)
+
+
+def define_logging_options(options=None):
+ """Add logging-related flags to ``options``.
+
+ These options are present automatically on the default options instance;
+ this method is only necessary if you have created your own `.OptionParser`.
+
+ .. versionadded:: 4.2
+ This function existed in prior versions but was broken and undocumented until 4.2.
+ """
+ if options is None:
+ # late import to prevent cycle
+ import tornado.options
+ options = tornado.options.options
+ options.define("logging", default="info",
+ help=("Set the Python log level. If 'none', tornado won't touch the "
+ "logging configuration."),
+ metavar="debug|info|warning|error|none")
+ options.define("log_to_stderr", type=bool, default=None,
+ help=("Send log output to stderr (colorized if possible). "
+ "By default use stderr if --log_file_prefix is not set and "
+ "no other logging is configured."))
+ options.define("log_file_prefix", type=str, default=None, metavar="PATH",
+ help=("Path prefix for log files. "
+ "Note that if you are running multiple tornado processes, "
+ "log_file_prefix must be different for each of them (e.g. "
+ "include the port number)"))
+ options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
+ help="max size of log files before rollover")
+ options.define("log_file_num_backups", type=int, default=10,
+ help="number of log files to keep")
+
+ options.define("log_rotate_when", type=str, default='midnight',
+ help=("specify the type of TimedRotatingFileHandler interval "
+ "other options:('S', 'M', 'H', 'D', 'W0'-'W6')"))
+ options.define("log_rotate_interval", type=int, default=1,
+ help="The interval value of timed rotating")
+
+ options.define("log_rotate_mode", type=str, default='size',
+ help="The mode of rotating files(time or size)")
+
+ options.add_parse_callback(lambda: enable_pretty_logging(options))
diff --git a/contrib/python/tornado/tornado-4/tornado/netutil.py b/contrib/python/tornado/tornado-4/tornado/netutil.py
index 59df1435ea..e74434234a 100644
--- a/contrib/python/tornado/tornado-4/tornado/netutil.py
+++ b/contrib/python/tornado/tornado-4/tornado/netutil.py
@@ -1,531 +1,531 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Miscellaneous network utility code."""
-
-from __future__ import absolute_import, division, print_function
-
-import errno
-import os
-import sys
-import socket
-import stat
-
-from tornado.concurrent import dummy_executor, run_on_executor
-from tornado.ioloop import IOLoop
-from tornado.platform.auto import set_close_exec
-from tornado.util import PY3, Configurable, errno_from_exception
-
-try:
- import ssl
-except ImportError:
- # ssl is not available on Google App Engine
- ssl = None
-
-try:
- import certifi
-except ImportError:
- # certifi is optional as long as we have ssl.create_default_context.
- if ssl is None or hasattr(ssl, 'create_default_context'):
- certifi = None
- else:
- raise
-
-if PY3:
- xrange = range
-
-if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
- ssl_match_hostname = ssl.match_hostname
- SSLCertificateError = ssl.CertificateError
-elif ssl is None:
- ssl_match_hostname = SSLCertificateError = None # type: ignore
-else:
- import backports.ssl_match_hostname
- ssl_match_hostname = backports.ssl_match_hostname.match_hostname
- SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore
-
-if hasattr(ssl, 'SSLContext'):
- if hasattr(ssl, 'create_default_context'):
- # Python 2.7.9+, 3.4+
- # Note that the naming of ssl.Purpose is confusing; the purpose
- # of a context is to authentiate the opposite side of the connection.
- _client_ssl_defaults = ssl.create_default_context(
- ssl.Purpose.SERVER_AUTH)
- # load ca certs bundled with binary
- _client_ssl_defaults.load_verify_locations(certifi.where())
- _server_ssl_defaults = ssl.create_default_context(
- ssl.Purpose.CLIENT_AUTH)
- else:
- # Python 3.2-3.3
- _client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- _client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED
- _client_ssl_defaults.load_verify_locations(certifi.where())
- _server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- if hasattr(ssl, 'OP_NO_COMPRESSION'):
- # Disable TLS compression to avoid CRIME and related attacks.
- # This constant wasn't added until python 3.3.
- _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
- _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
-
-elif ssl:
- # Python 2.6-2.7.8
- _client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED,
- ca_certs=certifi.where())
- _server_ssl_defaults = {}
-else:
- # Google App Engine
- _client_ssl_defaults = dict(cert_reqs=None,
- ca_certs=None)
- _server_ssl_defaults = {}
-
-# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
-# getaddrinfo attempts to import encodings.idna. If this is done at
-# module-import time, the import lock is already held by the main thread,
-# leading to deadlock. Avoid it by caching the idna encoder on the main
-# thread now.
-u'foo'.encode('idna')
-
-# For undiagnosed reasons, 'latin1' codec may also need to be preloaded.
-u'foo'.encode('latin1')
-
-# These errnos indicate that a non-blocking operation must be retried
-# at a later time. On most platforms they're the same value, but on
-# some they differ.
-_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
-
-if hasattr(errno, "WSAEWOULDBLOCK"):
- _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore
-
-# Default backlog used when calling sock.listen()
-_DEFAULT_BACKLOG = 128
-
-
-def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
- backlog=_DEFAULT_BACKLOG, flags=None, reuse_port=False):
- """Creates listening sockets bound to the given port and address.
-
- Returns a list of socket objects (multiple sockets are returned if
- the given address maps to multiple IP addresses, which is most common
- for mixed IPv4 and IPv6 use).
-
- Address may be either an IP address or hostname. If it's a hostname,
- the server will listen on all IP addresses associated with the
- name. Address may be an empty string or None to listen on all
- available interfaces. Family may be set to either `socket.AF_INET`
- or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
- both will be used if available.
-
- The ``backlog`` argument has the same meaning as for
- `socket.listen() <socket.socket.listen>`.
-
- ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
- ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
-
- ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
- in the list. If your platform doesn't support this option ValueError will
- be raised.
- """
- if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
- raise ValueError("the platform doesn't support SO_REUSEPORT")
-
- sockets = []
- if address == "":
- address = None
- if not socket.has_ipv6 and family == socket.AF_UNSPEC:
- # Python can be compiled with --disable-ipv6, which causes
- # operations on AF_INET6 sockets to fail, but does not
- # automatically exclude those results from getaddrinfo
- # results.
- # http://bugs.python.org/issue16208
- family = socket.AF_INET
- if flags is None:
- flags = socket.AI_PASSIVE
- bound_port = None
- for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
- 0, flags)):
- af, socktype, proto, canonname, sockaddr = res
- if (sys.platform == 'darwin' and address == 'localhost' and
- af == socket.AF_INET6 and sockaddr[3] != 0):
- # Mac OS X includes a link-local address fe80::1%lo0 in the
- # getaddrinfo results for 'localhost'. However, the firewall
- # doesn't understand that this is a local address and will
- # prompt for access (often repeatedly, due to an apparent
- # bug in its ability to remember granting access to an
- # application). Skip these addresses.
- continue
- try:
- sock = socket.socket(af, socktype, proto)
- except socket.error as e:
- if errno_from_exception(e) == errno.EAFNOSUPPORT:
- continue
- raise
- set_close_exec(sock.fileno())
- if os.name != 'nt':
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- if reuse_port:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- if af == socket.AF_INET6:
- # On linux, ipv6 sockets accept ipv4 too by default,
- # but this makes it impossible to bind to both
- # 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
- # separate sockets *must* be used to listen for both ipv4
- # and ipv6. For consistency, always disable ipv4 on our
- # ipv6 sockets and use a separate ipv4 socket when needed.
- #
- # Python 2.x on windows doesn't have IPPROTO_IPV6.
- if hasattr(socket, "IPPROTO_IPV6"):
- sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
-
- # automatic port allocation with port=None
- # should bind on the same port on IPv4 and IPv6
- host, requested_port = sockaddr[:2]
- if requested_port == 0 and bound_port is not None:
- sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
-
- sock.setblocking(0)
- sock.bind(sockaddr)
- bound_port = sock.getsockname()[1]
- sock.listen(backlog)
- sockets.append(sock)
- return sockets
-
-
-if hasattr(socket, 'AF_UNIX'):
- def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG):
- """Creates a listening unix socket.
-
- If a socket with the given name already exists, it will be deleted.
- If any other file with that name exists, an exception will be
- raised.
-
- Returns a socket object (not a list of socket objects like
- `bind_sockets`)
- """
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- set_close_exec(sock.fileno())
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- sock.setblocking(0)
- try:
- st = os.stat(file)
- except OSError as err:
- if errno_from_exception(err) != errno.ENOENT:
- raise
- else:
- if stat.S_ISSOCK(st.st_mode):
- os.remove(file)
- else:
- raise ValueError("File %s exists and is not a socket", file)
- sock.bind(file)
- os.chmod(file, mode)
- sock.listen(backlog)
- return sock
-
-
-def add_accept_handler(sock, callback, io_loop=None):
- """Adds an `.IOLoop` event handler to accept new connections on ``sock``.
-
- When a connection is accepted, ``callback(connection, address)`` will
- be run (``connection`` is a socket object, and ``address`` is the
- address of the other end of the connection). Note that this signature
- is different from the ``callback(fd, events)`` signature used for
- `.IOLoop` handlers.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- if io_loop is None:
- io_loop = IOLoop.current()
-
- def accept_handler(fd, events):
- # More connections may come in while we're handling callbacks;
- # to prevent starvation of other tasks we must limit the number
- # of connections we accept at a time. Ideally we would accept
- # up to the number of connections that were waiting when we
- # entered this method, but this information is not available
- # (and rearranging this method to call accept() as many times
- # as possible before running any callbacks would have adverse
- # effects on load balancing in multiprocess configurations).
- # Instead, we use the (default) listen backlog as a rough
- # heuristic for the number of connections we can reasonably
- # accept at once.
- for i in xrange(_DEFAULT_BACKLOG):
- try:
- connection, address = sock.accept()
- except socket.error as e:
- # _ERRNO_WOULDBLOCK indicate we have accepted every
- # connection that is available.
- if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
- return
- # ECONNABORTED indicates that there was a connection
- # but it was closed while still in the accept queue.
- # (observed on FreeBSD).
- if errno_from_exception(e) == errno.ECONNABORTED:
- continue
- raise
- set_close_exec(connection.fileno())
- callback(connection, address)
- io_loop.add_handler(sock, accept_handler, IOLoop.READ)
-
-
-def is_valid_ip(ip):
- """Returns true if the given string is a well-formed IP address.
-
- Supports IPv4 and IPv6.
- """
- if not ip or '\x00' in ip:
- # getaddrinfo resolves empty strings to localhost, and truncates
- # on zero bytes.
- return False
- try:
- res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
- socket.SOCK_STREAM,
- 0, socket.AI_NUMERICHOST)
- return bool(res)
- except socket.gaierror as e:
- if e.args[0] == socket.EAI_NONAME:
- return False
- raise
- return True
-
-
-class Resolver(Configurable):
- """Configurable asynchronous DNS resolver interface.
-
- By default, a blocking implementation is used (which simply calls
- `socket.getaddrinfo`). An alternative implementation can be
- chosen with the `Resolver.configure <.Configurable.configure>`
- class method::
-
- Resolver.configure('tornado.netutil.ThreadedResolver')
-
- The implementations of this interface included with Tornado are
-
- * `tornado.netutil.BlockingResolver`
- * `tornado.netutil.ThreadedResolver`
- * `tornado.netutil.OverrideResolver`
- * `tornado.platform.twisted.TwistedResolver`
- * `tornado.platform.caresresolver.CaresResolver`
- """
- @classmethod
- def configurable_base(cls):
- return Resolver
-
- @classmethod
- def configurable_default(cls):
- return BlockingResolver
-
- def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
- """Resolves an address.
-
- The ``host`` argument is a string which may be a hostname or a
- literal IP address.
-
- Returns a `.Future` whose result is a list of (family,
- address) pairs, where address is a tuple suitable to pass to
- `socket.connect <socket.socket.connect>` (i.e. a ``(host,
- port)`` pair for IPv4; additional fields may be present for
- IPv6). If a ``callback`` is passed, it will be run with the
- result as an argument when it is complete.
-
- :raises IOError: if the address cannot be resolved.
-
- .. versionchanged:: 4.4
- Standardized all implementations to raise `IOError`.
- """
- raise NotImplementedError()
-
- def close(self):
- """Closes the `Resolver`, freeing any resources used.
-
- .. versionadded:: 3.1
-
- """
- pass
-
-
-class ExecutorResolver(Resolver):
- """Resolver implementation using a `concurrent.futures.Executor`.
-
- Use this instead of `ThreadedResolver` when you require additional
- control over the executor being used.
-
- The executor will be shut down when the resolver is closed unless
- ``close_resolver=False``; use this if you want to reuse the same
- executor elsewhere.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- def initialize(self, io_loop=None, executor=None, close_executor=True):
- self.io_loop = io_loop or IOLoop.current()
- if executor is not None:
- self.executor = executor
- self.close_executor = close_executor
- else:
- self.executor = dummy_executor
- self.close_executor = False
-
- def close(self):
- if self.close_executor:
- self.executor.shutdown()
- self.executor = None
-
- @run_on_executor
- def resolve(self, host, port, family=socket.AF_UNSPEC):
- # On Solaris, getaddrinfo fails if the given port is not found
- # in /etc/services and no socket type is given, so we must pass
- # one here. The socket type used here doesn't seem to actually
- # matter (we discard the one we get back in the results),
- # so the addresses we return should still be usable with SOCK_DGRAM.
- addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
- results = []
- for family, socktype, proto, canonname, address in addrinfo:
- results.append((family, address))
- return results
-
-
-class BlockingResolver(ExecutorResolver):
- """Default `Resolver` implementation, using `socket.getaddrinfo`.
-
- The `.IOLoop` will be blocked during the resolution, although the
- callback will not be run until the next `.IOLoop` iteration.
- """
- def initialize(self, io_loop=None):
- super(BlockingResolver, self).initialize(io_loop=io_loop)
-
-
-class ThreadedResolver(ExecutorResolver):
- """Multithreaded non-blocking `Resolver` implementation.
-
- Requires the `concurrent.futures` package to be installed
- (available in the standard library since Python 3.2,
- installable with ``pip install futures`` in older versions).
-
- The thread pool size can be configured with::
-
- Resolver.configure('tornado.netutil.ThreadedResolver',
- num_threads=10)
-
- .. versionchanged:: 3.1
- All ``ThreadedResolvers`` share a single thread pool, whose
- size is set by the first one to be created.
- """
- _threadpool = None # type: ignore
- _threadpool_pid = None # type: int
-
- def initialize(self, io_loop=None, num_threads=10):
- threadpool = ThreadedResolver._create_threadpool(num_threads)
- super(ThreadedResolver, self).initialize(
- io_loop=io_loop, executor=threadpool, close_executor=False)
-
- @classmethod
- def _create_threadpool(cls, num_threads):
- pid = os.getpid()
- if cls._threadpool_pid != pid:
- # Threads cannot survive after a fork, so if our pid isn't what it
- # was when we created the pool then delete it.
- cls._threadpool = None
- if cls._threadpool is None:
- from concurrent.futures import ThreadPoolExecutor
- cls._threadpool = ThreadPoolExecutor(num_threads)
- cls._threadpool_pid = pid
- return cls._threadpool
-
-
-class OverrideResolver(Resolver):
- """Wraps a resolver with a mapping of overrides.
-
- This can be used to make local DNS changes (e.g. for testing)
- without modifying system-wide settings.
-
- The mapping can contain either host strings or host-port pairs.
- """
- def initialize(self, resolver, mapping):
- self.resolver = resolver
- self.mapping = mapping
-
- def close(self):
- self.resolver.close()
-
- def resolve(self, host, port, *args, **kwargs):
- if (host, port) in self.mapping:
- host, port = self.mapping[(host, port)]
- elif host in self.mapping:
- host = self.mapping[host]
- return self.resolver.resolve(host, port, *args, **kwargs)
-
-
-# These are the keyword arguments to ssl.wrap_socket that must be translated
-# to their SSLContext equivalents (the other arguments are still passed
-# to SSLContext.wrap_socket).
-_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
- 'cert_reqs', 'ca_certs', 'ciphers'])
-
-
-def ssl_options_to_context(ssl_options):
- """Try to convert an ``ssl_options`` dictionary to an
- `~ssl.SSLContext` object.
-
- The ``ssl_options`` dictionary contains keywords to be passed to
- `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
- be used instead. This function converts the dict form to its
- `~ssl.SSLContext` equivalent, and may be used when a component which
- accepts both forms needs to upgrade to the `~ssl.SSLContext` version
- to use features like SNI or NPN.
- """
- if isinstance(ssl_options, dict):
- assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
- if (not hasattr(ssl, 'SSLContext') or
- isinstance(ssl_options, ssl.SSLContext)):
- return ssl_options
- context = ssl.SSLContext(
- ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
- if 'certfile' in ssl_options:
- context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
- if 'cert_reqs' in ssl_options:
- context.verify_mode = ssl_options['cert_reqs']
- if 'ca_certs' in ssl_options:
- context.load_verify_locations(ssl_options['ca_certs'])
- if 'ciphers' in ssl_options:
- context.set_ciphers(ssl_options['ciphers'])
- if hasattr(ssl, 'OP_NO_COMPRESSION'):
- # Disable TLS compression to avoid CRIME and related attacks.
- # This constant wasn't added until python 3.3.
- context.options |= ssl.OP_NO_COMPRESSION
- return context
-
-
-def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
- """Returns an ``ssl.SSLSocket`` wrapping the given socket.
-
- ``ssl_options`` may be either an `ssl.SSLContext` object or a
- dictionary (as accepted by `ssl_options_to_context`). Additional
- keyword arguments are passed to ``wrap_socket`` (either the
- `~ssl.SSLContext` method or the `ssl` module function as
- appropriate).
- """
- context = ssl_options_to_context(ssl_options)
- if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
- if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
- # Python doesn't have server-side SNI support so we can't
- # really unittest this, but it can be manually tested with
- # python3.2 -m tornado.httpclient https://sni.velox.ch
- return context.wrap_socket(socket, server_hostname=server_hostname,
- **kwargs)
- else:
- return context.wrap_socket(socket, **kwargs)
- else:
- return ssl.wrap_socket(socket, **dict(context, **kwargs)) # type: ignore
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Miscellaneous network utility code."""
+
+from __future__ import absolute_import, division, print_function
+
+import errno
+import os
+import sys
+import socket
+import stat
+
+from tornado.concurrent import dummy_executor, run_on_executor
+from tornado.ioloop import IOLoop
+from tornado.platform.auto import set_close_exec
+from tornado.util import PY3, Configurable, errno_from_exception
+
+try:
+ import ssl
+except ImportError:
+ # ssl is not available on Google App Engine
+ ssl = None
+
+try:
+ import certifi
+except ImportError:
+ # certifi is optional as long as we have ssl.create_default_context.
+ if ssl is None or hasattr(ssl, 'create_default_context'):
+ certifi = None
+ else:
+ raise
+
+if PY3:
+ xrange = range
+
+if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
+ ssl_match_hostname = ssl.match_hostname
+ SSLCertificateError = ssl.CertificateError
+elif ssl is None:
+ ssl_match_hostname = SSLCertificateError = None # type: ignore
+else:
+ import backports.ssl_match_hostname
+ ssl_match_hostname = backports.ssl_match_hostname.match_hostname
+ SSLCertificateError = backports.ssl_match_hostname.CertificateError # type: ignore
+
+if hasattr(ssl, 'SSLContext'):
+ if hasattr(ssl, 'create_default_context'):
+ # Python 2.7.9+, 3.4+
+ # Note that the naming of ssl.Purpose is confusing; the purpose
+ # of a context is to authentiate the opposite side of the connection.
+ _client_ssl_defaults = ssl.create_default_context(
+ ssl.Purpose.SERVER_AUTH)
+ # load ca certs bundled with binary
+ _client_ssl_defaults.load_verify_locations(certifi.where())
+ _server_ssl_defaults = ssl.create_default_context(
+ ssl.Purpose.CLIENT_AUTH)
+ else:
+ # Python 3.2-3.3
+ _client_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ _client_ssl_defaults.verify_mode = ssl.CERT_REQUIRED
+ _client_ssl_defaults.load_verify_locations(certifi.where())
+ _server_ssl_defaults = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ if hasattr(ssl, 'OP_NO_COMPRESSION'):
+ # Disable TLS compression to avoid CRIME and related attacks.
+ # This constant wasn't added until python 3.3.
+ _client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
+ _server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
+
+elif ssl:
+ # Python 2.6-2.7.8
+ _client_ssl_defaults = dict(cert_reqs=ssl.CERT_REQUIRED,
+ ca_certs=certifi.where())
+ _server_ssl_defaults = {}
+else:
+ # Google App Engine
+ _client_ssl_defaults = dict(cert_reqs=None,
+ ca_certs=None)
+ _server_ssl_defaults = {}
+
+# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
+# getaddrinfo attempts to import encodings.idna. If this is done at
+# module-import time, the import lock is already held by the main thread,
+# leading to deadlock. Avoid it by caching the idna encoder on the main
+# thread now.
+u'foo'.encode('idna')
+
+# For undiagnosed reasons, 'latin1' codec may also need to be preloaded.
+u'foo'.encode('latin1')
+
+# These errnos indicate that a non-blocking operation must be retried
+# at a later time. On most platforms they're the same value, but on
+# some they differ.
+_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
+
+if hasattr(errno, "WSAEWOULDBLOCK"):
+ _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore
+
+# Default backlog used when calling sock.listen()
+_DEFAULT_BACKLOG = 128
+
+
+def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
+ backlog=_DEFAULT_BACKLOG, flags=None, reuse_port=False):
+ """Creates listening sockets bound to the given port and address.
+
+ Returns a list of socket objects (multiple sockets are returned if
+ the given address maps to multiple IP addresses, which is most common
+ for mixed IPv4 and IPv6 use).
+
+ Address may be either an IP address or hostname. If it's a hostname,
+ the server will listen on all IP addresses associated with the
+ name. Address may be an empty string or None to listen on all
+ available interfaces. Family may be set to either `socket.AF_INET`
+ or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
+ both will be used if available.
+
+ The ``backlog`` argument has the same meaning as for
+ `socket.listen() <socket.socket.listen>`.
+
+ ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
+ ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
+
+ ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
+ in the list. If your platform doesn't support this option ValueError will
+ be raised.
+ """
+ if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
+ raise ValueError("the platform doesn't support SO_REUSEPORT")
+
+ sockets = []
+ if address == "":
+ address = None
+ if not socket.has_ipv6 and family == socket.AF_UNSPEC:
+ # Python can be compiled with --disable-ipv6, which causes
+ # operations on AF_INET6 sockets to fail, but does not
+ # automatically exclude those results from getaddrinfo
+ # results.
+ # http://bugs.python.org/issue16208
+ family = socket.AF_INET
+ if flags is None:
+ flags = socket.AI_PASSIVE
+ bound_port = None
+ for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
+ 0, flags)):
+ af, socktype, proto, canonname, sockaddr = res
+ if (sys.platform == 'darwin' and address == 'localhost' and
+ af == socket.AF_INET6 and sockaddr[3] != 0):
+ # Mac OS X includes a link-local address fe80::1%lo0 in the
+ # getaddrinfo results for 'localhost'. However, the firewall
+ # doesn't understand that this is a local address and will
+ # prompt for access (often repeatedly, due to an apparent
+ # bug in its ability to remember granting access to an
+ # application). Skip these addresses.
+ continue
+ try:
+ sock = socket.socket(af, socktype, proto)
+ except socket.error as e:
+ if errno_from_exception(e) == errno.EAFNOSUPPORT:
+ continue
+ raise
+ set_close_exec(sock.fileno())
+ if os.name != 'nt':
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if reuse_port:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ if af == socket.AF_INET6:
+ # On linux, ipv6 sockets accept ipv4 too by default,
+ # but this makes it impossible to bind to both
+ # 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
+ # separate sockets *must* be used to listen for both ipv4
+ # and ipv6. For consistency, always disable ipv4 on our
+ # ipv6 sockets and use a separate ipv4 socket when needed.
+ #
+ # Python 2.x on windows doesn't have IPPROTO_IPV6.
+ if hasattr(socket, "IPPROTO_IPV6"):
+ sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
+
+ # automatic port allocation with port=None
+ # should bind on the same port on IPv4 and IPv6
+ host, requested_port = sockaddr[:2]
+ if requested_port == 0 and bound_port is not None:
+ sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
+
+ sock.setblocking(0)
+ sock.bind(sockaddr)
+ bound_port = sock.getsockname()[1]
+ sock.listen(backlog)
+ sockets.append(sock)
+ return sockets
+
+
+if hasattr(socket, 'AF_UNIX'):
+ def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG):
+ """Creates a listening unix socket.
+
+ If a socket with the given name already exists, it will be deleted.
+ If any other file with that name exists, an exception will be
+ raised.
+
+ Returns a socket object (not a list of socket objects like
+ `bind_sockets`)
+ """
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ set_close_exec(sock.fileno())
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.setblocking(0)
+ try:
+ st = os.stat(file)
+ except OSError as err:
+ if errno_from_exception(err) != errno.ENOENT:
+ raise
+ else:
+ if stat.S_ISSOCK(st.st_mode):
+ os.remove(file)
+ else:
+ raise ValueError("File %s exists and is not a socket", file)
+ sock.bind(file)
+ os.chmod(file, mode)
+ sock.listen(backlog)
+ return sock
+
+
+def add_accept_handler(sock, callback, io_loop=None):
+ """Adds an `.IOLoop` event handler to accept new connections on ``sock``.
+
+ When a connection is accepted, ``callback(connection, address)`` will
+ be run (``connection`` is a socket object, and ``address`` is the
+ address of the other end of the connection). Note that this signature
+ is different from the ``callback(fd, events)`` signature used for
+ `.IOLoop` handlers.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ if io_loop is None:
+ io_loop = IOLoop.current()
+
+ def accept_handler(fd, events):
+ # More connections may come in while we're handling callbacks;
+ # to prevent starvation of other tasks we must limit the number
+ # of connections we accept at a time. Ideally we would accept
+ # up to the number of connections that were waiting when we
+ # entered this method, but this information is not available
+ # (and rearranging this method to call accept() as many times
+ # as possible before running any callbacks would have adverse
+ # effects on load balancing in multiprocess configurations).
+ # Instead, we use the (default) listen backlog as a rough
+ # heuristic for the number of connections we can reasonably
+ # accept at once.
+ for i in xrange(_DEFAULT_BACKLOG):
+ try:
+ connection, address = sock.accept()
+ except socket.error as e:
+ # _ERRNO_WOULDBLOCK indicate we have accepted every
+ # connection that is available.
+ if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
+ return
+ # ECONNABORTED indicates that there was a connection
+ # but it was closed while still in the accept queue.
+ # (observed on FreeBSD).
+ if errno_from_exception(e) == errno.ECONNABORTED:
+ continue
+ raise
+ set_close_exec(connection.fileno())
+ callback(connection, address)
+ io_loop.add_handler(sock, accept_handler, IOLoop.READ)
+
+
+def is_valid_ip(ip):
+ """Returns true if the given string is a well-formed IP address.
+
+ Supports IPv4 and IPv6.
+ """
+ if not ip or '\x00' in ip:
+ # getaddrinfo resolves empty strings to localhost, and truncates
+ # on zero bytes.
+ return False
+ try:
+ res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ 0, socket.AI_NUMERICHOST)
+ return bool(res)
+ except socket.gaierror as e:
+ if e.args[0] == socket.EAI_NONAME:
+ return False
+ raise
+ return True
+
+
+class Resolver(Configurable):
+ """Configurable asynchronous DNS resolver interface.
+
+ By default, a blocking implementation is used (which simply calls
+ `socket.getaddrinfo`). An alternative implementation can be
+ chosen with the `Resolver.configure <.Configurable.configure>`
+ class method::
+
+ Resolver.configure('tornado.netutil.ThreadedResolver')
+
+ The implementations of this interface included with Tornado are
+
+ * `tornado.netutil.BlockingResolver`
+ * `tornado.netutil.ThreadedResolver`
+ * `tornado.netutil.OverrideResolver`
+ * `tornado.platform.twisted.TwistedResolver`
+ * `tornado.platform.caresresolver.CaresResolver`
+ """
+ @classmethod
+ def configurable_base(cls):
+ return Resolver
+
+ @classmethod
+ def configurable_default(cls):
+ return BlockingResolver
+
+ def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
+ """Resolves an address.
+
+ The ``host`` argument is a string which may be a hostname or a
+ literal IP address.
+
+ Returns a `.Future` whose result is a list of (family,
+ address) pairs, where address is a tuple suitable to pass to
+ `socket.connect <socket.socket.connect>` (i.e. a ``(host,
+ port)`` pair for IPv4; additional fields may be present for
+ IPv6). If a ``callback`` is passed, it will be run with the
+ result as an argument when it is complete.
+
+ :raises IOError: if the address cannot be resolved.
+
+ .. versionchanged:: 4.4
+ Standardized all implementations to raise `IOError`.
+ """
+ raise NotImplementedError()
+
+ def close(self):
+ """Closes the `Resolver`, freeing any resources used.
+
+ .. versionadded:: 3.1
+
+ """
+ pass
+
+
+class ExecutorResolver(Resolver):
+ """Resolver implementation using a `concurrent.futures.Executor`.
+
+ Use this instead of `ThreadedResolver` when you require additional
+ control over the executor being used.
+
+ The executor will be shut down when the resolver is closed unless
+ ``close_resolver=False``; use this if you want to reuse the same
+ executor elsewhere.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ def initialize(self, io_loop=None, executor=None, close_executor=True):
+ self.io_loop = io_loop or IOLoop.current()
+ if executor is not None:
+ self.executor = executor
+ self.close_executor = close_executor
+ else:
+ self.executor = dummy_executor
+ self.close_executor = False
+
+ def close(self):
+ if self.close_executor:
+ self.executor.shutdown()
+ self.executor = None
+
+ @run_on_executor
+ def resolve(self, host, port, family=socket.AF_UNSPEC):
+ # On Solaris, getaddrinfo fails if the given port is not found
+ # in /etc/services and no socket type is given, so we must pass
+ # one here. The socket type used here doesn't seem to actually
+ # matter (we discard the one we get back in the results),
+ # so the addresses we return should still be usable with SOCK_DGRAM.
+ addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
+ results = []
+ for family, socktype, proto, canonname, address in addrinfo:
+ results.append((family, address))
+ return results
+
+
+class BlockingResolver(ExecutorResolver):
+ """Default `Resolver` implementation, using `socket.getaddrinfo`.
+
+ The `.IOLoop` will be blocked during the resolution, although the
+ callback will not be run until the next `.IOLoop` iteration.
+ """
+ def initialize(self, io_loop=None):
+ super(BlockingResolver, self).initialize(io_loop=io_loop)
+
+
+class ThreadedResolver(ExecutorResolver):
+ """Multithreaded non-blocking `Resolver` implementation.
+
+ Requires the `concurrent.futures` package to be installed
+ (available in the standard library since Python 3.2,
+ installable with ``pip install futures`` in older versions).
+
+ The thread pool size can be configured with::
+
+ Resolver.configure('tornado.netutil.ThreadedResolver',
+ num_threads=10)
+
+ .. versionchanged:: 3.1
+ All ``ThreadedResolvers`` share a single thread pool, whose
+ size is set by the first one to be created.
+ """
+ _threadpool = None # type: ignore
+ _threadpool_pid = None # type: int
+
+ def initialize(self, io_loop=None, num_threads=10):
+ threadpool = ThreadedResolver._create_threadpool(num_threads)
+ super(ThreadedResolver, self).initialize(
+ io_loop=io_loop, executor=threadpool, close_executor=False)
+
+ @classmethod
+ def _create_threadpool(cls, num_threads):
+ pid = os.getpid()
+ if cls._threadpool_pid != pid:
+ # Threads cannot survive after a fork, so if our pid isn't what it
+ # was when we created the pool then delete it.
+ cls._threadpool = None
+ if cls._threadpool is None:
+ from concurrent.futures import ThreadPoolExecutor
+ cls._threadpool = ThreadPoolExecutor(num_threads)
+ cls._threadpool_pid = pid
+ return cls._threadpool
+
+
+class OverrideResolver(Resolver):
+ """Wraps a resolver with a mapping of overrides.
+
+ This can be used to make local DNS changes (e.g. for testing)
+ without modifying system-wide settings.
+
+ The mapping can contain either host strings or host-port pairs.
+ """
+ def initialize(self, resolver, mapping):
+ self.resolver = resolver
+ self.mapping = mapping
+
+ def close(self):
+ self.resolver.close()
+
+ def resolve(self, host, port, *args, **kwargs):
+ if (host, port) in self.mapping:
+ host, port = self.mapping[(host, port)]
+ elif host in self.mapping:
+ host = self.mapping[host]
+ return self.resolver.resolve(host, port, *args, **kwargs)
+
+
+# These are the keyword arguments to ssl.wrap_socket that must be translated
+# to their SSLContext equivalents (the other arguments are still passed
+# to SSLContext.wrap_socket).
+_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
+ 'cert_reqs', 'ca_certs', 'ciphers'])
+
+
+def ssl_options_to_context(ssl_options):
+ """Try to convert an ``ssl_options`` dictionary to an
+ `~ssl.SSLContext` object.
+
+ The ``ssl_options`` dictionary contains keywords to be passed to
+ `ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
+ be used instead. This function converts the dict form to its
+ `~ssl.SSLContext` equivalent, and may be used when a component which
+ accepts both forms needs to upgrade to the `~ssl.SSLContext` version
+ to use features like SNI or NPN.
+ """
+ if isinstance(ssl_options, dict):
+ assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
+ if (not hasattr(ssl, 'SSLContext') or
+ isinstance(ssl_options, ssl.SSLContext)):
+ return ssl_options
+ context = ssl.SSLContext(
+ ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
+ if 'certfile' in ssl_options:
+ context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
+ if 'cert_reqs' in ssl_options:
+ context.verify_mode = ssl_options['cert_reqs']
+ if 'ca_certs' in ssl_options:
+ context.load_verify_locations(ssl_options['ca_certs'])
+ if 'ciphers' in ssl_options:
+ context.set_ciphers(ssl_options['ciphers'])
+ if hasattr(ssl, 'OP_NO_COMPRESSION'):
+ # Disable TLS compression to avoid CRIME and related attacks.
+ # This constant wasn't added until python 3.3.
+ context.options |= ssl.OP_NO_COMPRESSION
+ return context
+
+
+def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
+ """Returns an ``ssl.SSLSocket`` wrapping the given socket.
+
+ ``ssl_options`` may be either an `ssl.SSLContext` object or a
+ dictionary (as accepted by `ssl_options_to_context`). Additional
+ keyword arguments are passed to ``wrap_socket`` (either the
+ `~ssl.SSLContext` method or the `ssl` module function as
+ appropriate).
+ """
+ context = ssl_options_to_context(ssl_options)
+ if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
+ if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
+ # Python doesn't have server-side SNI support so we can't
+ # really unittest this, but it can be manually tested with
+ # python3.2 -m tornado.httpclient https://sni.velox.ch
+ return context.wrap_socket(socket, server_hostname=server_hostname,
+ **kwargs)
+ else:
+ return context.wrap_socket(socket, **kwargs)
+ else:
+ return ssl.wrap_socket(socket, **dict(context, **kwargs)) # type: ignore
diff --git a/contrib/python/tornado/tornado-4/tornado/options.py b/contrib/python/tornado/tornado-4/tornado/options.py
index 707fbd35ee..ffaaba6209 100644
--- a/contrib/python/tornado/tornado-4/tornado/options.py
+++ b/contrib/python/tornado/tornado-4/tornado/options.py
@@ -1,594 +1,594 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A command line parsing module that lets modules define their own options.
-
-Each module defines its own options which are added to the global
-option namespace, e.g.::
-
- from tornado.options import define, options
-
- define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
- define("memcache_hosts", default="127.0.0.1:11011", multiple=True,
- help="Main user memcache servers")
-
- def connect():
- db = database.Connection(options.mysql_host)
- ...
-
-The ``main()`` method of your application does not need to be aware of all of
-the options used throughout your program; they are all automatically loaded
-when the modules are loaded. However, all modules that define options
-must have been imported before the command line is parsed.
-
-Your ``main()`` method can parse the command line or parse a config file with
-either::
-
- tornado.options.parse_command_line()
- # or
- tornado.options.parse_config_file("/etc/server.conf")
-
-.. note:
-
- When using tornado.options.parse_command_line or
- tornado.options.parse_config_file, the only options that are set are
- ones that were previously defined with tornado.options.define.
-
-Command line formats are what you would expect (``--myoption=myvalue``).
-Config files are just Python files. Global names become options, e.g.::
-
- myoption = "myvalue"
- myotheroption = "myothervalue"
-
-We support `datetimes <datetime.datetime>`, `timedeltas
-<datetime.timedelta>`, ints, and floats (just pass a ``type`` kwarg to
-`define`). We also accept multi-value options. See the documentation for
-`define()` below.
-
-`tornado.options.options` is a singleton instance of `OptionParser`, and
-the top-level functions in this module (`define`, `parse_command_line`, etc)
-simply call methods on it. You may create additional `OptionParser`
-instances to define isolated sets of options, such as for subcommands.
-
-.. note::
-
- By default, several options are defined that will configure the
- standard `logging` module when `parse_command_line` or `parse_config_file`
- are called. If you want Tornado to leave the logging configuration
- alone so you can manage it yourself, either pass ``--logging=none``
- on the command line or do the following to disable it in code::
-
- from tornado.options import options, parse_command_line
- options.logging = None
- parse_command_line()
-
-.. versionchanged:: 4.3
- Dashes and underscores are fully interchangeable in option names;
- options can be defined, set, and read with any mix of the two.
- Dashes are typical for command-line usage while config files require
- underscores.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import datetime
-import numbers
-import re
-import sys
-import os
-import textwrap
-
-from tornado.escape import _unicode, native_str
-from tornado.log import define_logging_options
-from tornado import stack_context
-from tornado.util import basestring_type, exec_in
-
-
-class Error(Exception):
- """Exception raised by errors in the options module."""
- pass
-
-
-class OptionParser(object):
- """A collection of options, a dictionary with object-like access.
-
- Normally accessed via static functions in the `tornado.options` module,
- which reference a global instance.
- """
- def __init__(self):
- # we have to use self.__dict__ because we override setattr.
- self.__dict__['_options'] = {}
- self.__dict__['_parse_callbacks'] = []
- self.define("help", type=bool, help="show this help information",
- callback=self._help_callback)
-
- def _normalize_name(self, name):
- return name.replace('_', '-')
-
- def __getattr__(self, name):
- name = self._normalize_name(name)
- if isinstance(self._options.get(name), _Option):
- return self._options[name].value()
- raise AttributeError("Unrecognized option %r" % name)
-
- def __setattr__(self, name, value):
- name = self._normalize_name(name)
- if isinstance(self._options.get(name), _Option):
- return self._options[name].set(value)
- raise AttributeError("Unrecognized option %r" % name)
-
- def __iter__(self):
- return (opt.name for opt in self._options.values())
-
- def __contains__(self, name):
- name = self._normalize_name(name)
- return name in self._options
-
- def __getitem__(self, name):
- return self.__getattr__(name)
-
- def __setitem__(self, name, value):
- return self.__setattr__(name, value)
-
- def items(self):
- """A sequence of (name, value) pairs.
-
- .. versionadded:: 3.1
- """
- return [(opt.name, opt.value()) for name, opt in self._options.items()]
-
- def groups(self):
- """The set of option-groups created by ``define``.
-
- .. versionadded:: 3.1
- """
- return set(opt.group_name for opt in self._options.values())
-
- def group_dict(self, group):
- """The names and values of options in a group.
-
- Useful for copying options into Application settings::
-
- from tornado.options import define, parse_command_line, options
-
- define('template_path', group='application')
- define('static_path', group='application')
-
- parse_command_line()
-
- application = Application(
- handlers, **options.group_dict('application'))
-
- .. versionadded:: 3.1
- """
- return dict(
- (opt.name, opt.value()) for name, opt in self._options.items()
- if not group or group == opt.group_name)
-
- def as_dict(self):
- """The names and values of all options.
-
- .. versionadded:: 3.1
- """
- return dict(
- (opt.name, opt.value()) for name, opt in self._options.items())
-
- def define(self, name, default=None, type=None, help=None, metavar=None,
- multiple=False, group=None, callback=None):
- """Defines a new command line option.
-
- If ``type`` is given (one of str, float, int, datetime, or timedelta)
- or can be inferred from the ``default``, we parse the command line
- arguments based on the given type. If ``multiple`` is True, we accept
- comma-separated values, and the option value is always a list.
-
- For multi-value integers, we also accept the syntax ``x:y``, which
- turns into ``range(x, y)`` - very useful for long integer ranges.
-
- ``help`` and ``metavar`` are used to construct the
- automatically generated command line help string. The help
- message is formatted like::
-
- --name=METAVAR help string
-
- ``group`` is used to group the defined options in logical
- groups. By default, command line options are grouped by the
- file in which they are defined.
-
- Command line option names must be unique globally. They can be parsed
- from the command line with `parse_command_line` or parsed from a
- config file with `parse_config_file`.
-
- If a ``callback`` is given, it will be run with the new value whenever
- the option is changed. This can be used to combine command-line
- and file-based options::
-
- define("config", type=str, help="path to config file",
- callback=lambda path: parse_config_file(path, final=False))
-
- With this definition, options in the file specified by ``--config`` will
- override options set earlier on the command line, but can be overridden
- by later flags.
- """
- normalized = self._normalize_name(name)
- if normalized in self._options:
- raise Error("Option %r already defined in %s" %
- (normalized, self._options[normalized].file_name))
- frame = sys._getframe(0)
- options_file = frame.f_code.co_filename
-
- # Can be called directly, or through top level define() fn, in which
- # case, step up above that frame to look for real caller.
- if (frame.f_back.f_code.co_filename == options_file and
- frame.f_back.f_code.co_name == 'define'):
- frame = frame.f_back
-
- file_name = frame.f_back.f_code.co_filename
- if file_name == options_file:
- file_name = ""
- if type is None:
- if not multiple and default is not None:
- type = default.__class__
- else:
- type = str
- if group:
- group_name = group
- else:
- group_name = file_name
- option = _Option(name, file_name=file_name,
- default=default, type=type, help=help,
- metavar=metavar, multiple=multiple,
- group_name=group_name,
- callback=callback)
- self._options[normalized] = option
-
- def parse_command_line(self, args=None, final=True):
- """Parses all options given on the command line (defaults to
- `sys.argv`).
-
- Note that ``args[0]`` is ignored since it is the program name
- in `sys.argv`.
-
- We return a list of all arguments that are not parsed as options.
-
- If ``final`` is ``False``, parse callbacks will not be run.
- This is useful for applications that wish to combine configurations
- from multiple sources.
- """
- if args is None:
- args = sys.argv
- remaining = []
- for i in range(1, len(args)):
- # All things after the last option are command line arguments
- if not args[i].startswith("-"):
- remaining = args[i:]
- break
- if args[i] == "--":
- remaining = args[i + 1:]
- break
- arg = args[i].lstrip("-")
- name, equals, value = arg.partition("=")
- name = self._normalize_name(name)
- if name not in self._options:
- self.print_help()
- raise Error('Unrecognized command line option: %r' % name)
- option = self._options[name]
- if not equals:
- if option.type == bool:
- value = "true"
- else:
- raise Error('Option %r requires a value' % name)
- option.parse(value)
-
- if final:
- self.run_parse_callbacks()
-
- return remaining
-
- def parse_config_file(self, path, final=True):
- """Parses and loads the Python config file at the given path.
-
- If ``final`` is ``False``, parse callbacks will not be run.
- This is useful for applications that wish to combine configurations
- from multiple sources.
-
- .. versionchanged:: 4.1
- Config files are now always interpreted as utf-8 instead of
- the system default encoding.
-
- .. versionchanged:: 4.4
- The special variable ``__file__`` is available inside config
- files, specifying the absolute path to the config file itself.
- """
- config = {'__file__': os.path.abspath(path)}
- with open(path, 'rb') as f:
- exec_in(native_str(f.read()), config, config)
- for name in config:
- normalized = self._normalize_name(name)
- if normalized in self._options:
- self._options[normalized].set(config[name])
-
- if final:
- self.run_parse_callbacks()
-
- def print_help(self, file=None):
- """Prints all the command line options to stderr (or another file)."""
- if file is None:
- file = sys.stderr
- print("Usage: %s [OPTIONS]" % sys.argv[0], file=file)
- print("\nOptions:\n", file=file)
- by_group = {}
- for option in self._options.values():
- by_group.setdefault(option.group_name, []).append(option)
-
- for filename, o in sorted(by_group.items()):
- if filename:
- print("\n%s options:\n" % os.path.normpath(filename), file=file)
- o.sort(key=lambda option: option.name)
- for option in o:
- # Always print names with dashes in a CLI context.
- prefix = self._normalize_name(option.name)
- if option.metavar:
- prefix += "=" + option.metavar
- description = option.help or ""
- if option.default is not None and option.default != '':
- description += " (default %s)" % option.default
- lines = textwrap.wrap(description, 79 - 35)
- if len(prefix) > 30 or len(lines) == 0:
- lines.insert(0, '')
- print(" --%-30s %s" % (prefix, lines[0]), file=file)
- for line in lines[1:]:
- print("%-34s %s" % (' ', line), file=file)
- print(file=file)
-
- def _help_callback(self, value):
- if value:
- self.print_help()
- sys.exit(0)
-
- def add_parse_callback(self, callback):
- """Adds a parse callback, to be invoked when option parsing is done."""
- self._parse_callbacks.append(stack_context.wrap(callback))
-
- def run_parse_callbacks(self):
- for callback in self._parse_callbacks:
- callback()
-
- def mockable(self):
- """Returns a wrapper around self that is compatible with
- `mock.patch <unittest.mock.patch>`.
-
- The `mock.patch <unittest.mock.patch>` function (included in
- the standard library `unittest.mock` package since Python 3.3,
- or in the third-party ``mock`` package for older versions of
- Python) is incompatible with objects like ``options`` that
- override ``__getattr__`` and ``__setattr__``. This function
- returns an object that can be used with `mock.patch.object
- <unittest.mock.patch.object>` to modify option values::
-
- with mock.patch.object(options.mockable(), 'name', value):
- assert options.name == value
- """
- return _Mockable(self)
-
-
-class _Mockable(object):
- """`mock.patch` compatible wrapper for `OptionParser`.
-
- As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
- hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
- the attribute it set instead of setting a new one (assuming that
- the object does not catpure ``__setattr__``, so the patch
- created a new attribute in ``__dict__``).
-
- _Mockable's getattr and setattr pass through to the underlying
- OptionParser, and delattr undoes the effect of a previous setattr.
- """
- def __init__(self, options):
- # Modify __dict__ directly to bypass __setattr__
- self.__dict__['_options'] = options
- self.__dict__['_originals'] = {}
-
- def __getattr__(self, name):
- return getattr(self._options, name)
-
- def __setattr__(self, name, value):
- assert name not in self._originals, "don't reuse mockable objects"
- self._originals[name] = getattr(self._options, name)
- setattr(self._options, name, value)
-
- def __delattr__(self, name):
- setattr(self._options, name, self._originals.pop(name))
-
-
-class _Option(object):
- UNSET = object()
-
- def __init__(self, name, default=None, type=basestring_type, help=None,
- metavar=None, multiple=False, file_name=None, group_name=None,
- callback=None):
- if default is None and multiple:
- default = []
- self.name = name
- self.type = type
- self.help = help
- self.metavar = metavar
- self.multiple = multiple
- self.file_name = file_name
- self.group_name = group_name
- self.callback = callback
- self.default = default
- self._value = _Option.UNSET
-
- def value(self):
- return self.default if self._value is _Option.UNSET else self._value
-
- def parse(self, value):
- _parse = {
- datetime.datetime: self._parse_datetime,
- datetime.timedelta: self._parse_timedelta,
- bool: self._parse_bool,
- basestring_type: self._parse_string,
- }.get(self.type, self.type)
- if self.multiple:
- self._value = []
- for part in value.split(","):
- if issubclass(self.type, numbers.Integral):
- # allow ranges of the form X:Y (inclusive at both ends)
- lo, _, hi = part.partition(":")
- lo = _parse(lo)
- hi = _parse(hi) if hi else lo
- self._value.extend(range(lo, hi + 1))
- else:
- self._value.append(_parse(part))
- else:
- self._value = _parse(value)
- if self.callback is not None:
- self.callback(self._value)
- return self.value()
-
- def set(self, value):
- if self.multiple:
- if not isinstance(value, list):
- raise Error("Option %r is required to be a list of %s" %
- (self.name, self.type.__name__))
- for item in value:
- if item is not None and not isinstance(item, self.type):
- raise Error("Option %r is required to be a list of %s" %
- (self.name, self.type.__name__))
- else:
- if value is not None and not isinstance(value, self.type):
- raise Error("Option %r is required to be a %s (%s given)" %
- (self.name, self.type.__name__, type(value)))
- self._value = value
- if self.callback is not None:
- self.callback(self._value)
-
- # Supported date/time formats in our options
- _DATETIME_FORMATS = [
- "%a %b %d %H:%M:%S %Y",
- "%Y-%m-%d %H:%M:%S",
- "%Y-%m-%d %H:%M",
- "%Y-%m-%dT%H:%M",
- "%Y%m%d %H:%M:%S",
- "%Y%m%d %H:%M",
- "%Y-%m-%d",
- "%Y%m%d",
- "%H:%M:%S",
- "%H:%M",
- ]
-
- def _parse_datetime(self, value):
- for format in self._DATETIME_FORMATS:
- try:
- return datetime.datetime.strptime(value, format)
- except ValueError:
- pass
- raise Error('Unrecognized date/time format: %r' % value)
-
- _TIMEDELTA_ABBREV_DICT = {
- 'h': 'hours',
- 'm': 'minutes',
- 'min': 'minutes',
- 's': 'seconds',
- 'sec': 'seconds',
- 'ms': 'milliseconds',
- 'us': 'microseconds',
- 'd': 'days',
- 'w': 'weeks',
- }
-
- _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
-
- _TIMEDELTA_PATTERN = re.compile(
- r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE)
-
- def _parse_timedelta(self, value):
- try:
- sum = datetime.timedelta()
- start = 0
- while start < len(value):
- m = self._TIMEDELTA_PATTERN.match(value, start)
- if not m:
- raise Exception()
- num = float(m.group(1))
- units = m.group(2) or 'seconds'
- units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
- sum += datetime.timedelta(**{units: num})
- start = m.end()
- return sum
- except Exception:
- raise
-
- def _parse_bool(self, value):
- return value.lower() not in ("false", "0", "f")
-
- def _parse_string(self, value):
- return _unicode(value)
-
-
-options = OptionParser()
-"""Global options object.
-
-All defined options are available as attributes on this object.
-"""
-
-
-def define(name, default=None, type=None, help=None, metavar=None,
- multiple=False, group=None, callback=None):
- """Defines an option in the global namespace.
-
- See `OptionParser.define`.
- """
- return options.define(name, default=default, type=type, help=help,
- metavar=metavar, multiple=multiple, group=group,
- callback=callback)
-
-
-def parse_command_line(args=None, final=True):
- """Parses global options from the command line.
-
- See `OptionParser.parse_command_line`.
- """
- return options.parse_command_line(args, final=final)
-
-
-def parse_config_file(path, final=True):
- """Parses global options from a config file.
-
- See `OptionParser.parse_config_file`.
- """
- return options.parse_config_file(path, final=final)
-
-
-def print_help(file=None):
- """Prints all the command line options to stderr (or another file).
-
- See `OptionParser.print_help`.
- """
- return options.print_help(file)
-
-
-def add_parse_callback(callback):
- """Adds a parse callback, to be invoked when option parsing is done.
-
- See `OptionParser.add_parse_callback`
- """
- options.add_parse_callback(callback)
-
-
-# Default options
-define_logging_options(options)
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A command line parsing module that lets modules define their own options.
+
+Each module defines its own options which are added to the global
+option namespace, e.g.::
+
+ from tornado.options import define, options
+
+ define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
+ define("memcache_hosts", default="127.0.0.1:11011", multiple=True,
+ help="Main user memcache servers")
+
+ def connect():
+ db = database.Connection(options.mysql_host)
+ ...
+
+The ``main()`` method of your application does not need to be aware of all of
+the options used throughout your program; they are all automatically loaded
+when the modules are loaded. However, all modules that define options
+must have been imported before the command line is parsed.
+
+Your ``main()`` method can parse the command line or parse a config file with
+either::
+
+ tornado.options.parse_command_line()
+ # or
+ tornado.options.parse_config_file("/etc/server.conf")
+
+.. note:
+
+ When using tornado.options.parse_command_line or
+ tornado.options.parse_config_file, the only options that are set are
+ ones that were previously defined with tornado.options.define.
+
+Command line formats are what you would expect (``--myoption=myvalue``).
+Config files are just Python files. Global names become options, e.g.::
+
+ myoption = "myvalue"
+ myotheroption = "myothervalue"
+
+We support `datetimes <datetime.datetime>`, `timedeltas
+<datetime.timedelta>`, ints, and floats (just pass a ``type`` kwarg to
+`define`). We also accept multi-value options. See the documentation for
+`define()` below.
+
+`tornado.options.options` is a singleton instance of `OptionParser`, and
+the top-level functions in this module (`define`, `parse_command_line`, etc)
+simply call methods on it. You may create additional `OptionParser`
+instances to define isolated sets of options, such as for subcommands.
+
+.. note::
+
+ By default, several options are defined that will configure the
+ standard `logging` module when `parse_command_line` or `parse_config_file`
+ are called. If you want Tornado to leave the logging configuration
+ alone so you can manage it yourself, either pass ``--logging=none``
+ on the command line or do the following to disable it in code::
+
+ from tornado.options import options, parse_command_line
+ options.logging = None
+ parse_command_line()
+
+.. versionchanged:: 4.3
+ Dashes and underscores are fully interchangeable in option names;
+ options can be defined, set, and read with any mix of the two.
+ Dashes are typical for command-line usage while config files require
+ underscores.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import datetime
+import numbers
+import re
+import sys
+import os
+import textwrap
+
+from tornado.escape import _unicode, native_str
+from tornado.log import define_logging_options
+from tornado import stack_context
+from tornado.util import basestring_type, exec_in
+
+
+class Error(Exception):
+ """Exception raised by errors in the options module."""
+ pass
+
+
+class OptionParser(object):
+ """A collection of options, a dictionary with object-like access.
+
+ Normally accessed via static functions in the `tornado.options` module,
+ which reference a global instance.
+ """
+ def __init__(self):
+ # we have to use self.__dict__ because we override setattr.
+ self.__dict__['_options'] = {}
+ self.__dict__['_parse_callbacks'] = []
+ self.define("help", type=bool, help="show this help information",
+ callback=self._help_callback)
+
+ def _normalize_name(self, name):
+ return name.replace('_', '-')
+
+ def __getattr__(self, name):
+ name = self._normalize_name(name)
+ if isinstance(self._options.get(name), _Option):
+ return self._options[name].value()
+ raise AttributeError("Unrecognized option %r" % name)
+
+ def __setattr__(self, name, value):
+ name = self._normalize_name(name)
+ if isinstance(self._options.get(name), _Option):
+ return self._options[name].set(value)
+ raise AttributeError("Unrecognized option %r" % name)
+
+ def __iter__(self):
+ return (opt.name for opt in self._options.values())
+
+ def __contains__(self, name):
+ name = self._normalize_name(name)
+ return name in self._options
+
+ def __getitem__(self, name):
+ return self.__getattr__(name)
+
+ def __setitem__(self, name, value):
+ return self.__setattr__(name, value)
+
+ def items(self):
+ """A sequence of (name, value) pairs.
+
+ .. versionadded:: 3.1
+ """
+ return [(opt.name, opt.value()) for name, opt in self._options.items()]
+
+ def groups(self):
+ """The set of option-groups created by ``define``.
+
+ .. versionadded:: 3.1
+ """
+ return set(opt.group_name for opt in self._options.values())
+
+ def group_dict(self, group):
+ """The names and values of options in a group.
+
+ Useful for copying options into Application settings::
+
+ from tornado.options import define, parse_command_line, options
+
+ define('template_path', group='application')
+ define('static_path', group='application')
+
+ parse_command_line()
+
+ application = Application(
+ handlers, **options.group_dict('application'))
+
+ .. versionadded:: 3.1
+ """
+ return dict(
+ (opt.name, opt.value()) for name, opt in self._options.items()
+ if not group or group == opt.group_name)
+
+ def as_dict(self):
+ """The names and values of all options.
+
+ .. versionadded:: 3.1
+ """
+ return dict(
+ (opt.name, opt.value()) for name, opt in self._options.items())
+
+ def define(self, name, default=None, type=None, help=None, metavar=None,
+ multiple=False, group=None, callback=None):
+ """Defines a new command line option.
+
+ If ``type`` is given (one of str, float, int, datetime, or timedelta)
+ or can be inferred from the ``default``, we parse the command line
+ arguments based on the given type. If ``multiple`` is True, we accept
+ comma-separated values, and the option value is always a list.
+
+ For multi-value integers, we also accept the syntax ``x:y``, which
+ turns into ``range(x, y)`` - very useful for long integer ranges.
+
+ ``help`` and ``metavar`` are used to construct the
+ automatically generated command line help string. The help
+ message is formatted like::
+
+ --name=METAVAR help string
+
+ ``group`` is used to group the defined options in logical
+ groups. By default, command line options are grouped by the
+ file in which they are defined.
+
+ Command line option names must be unique globally. They can be parsed
+ from the command line with `parse_command_line` or parsed from a
+ config file with `parse_config_file`.
+
+ If a ``callback`` is given, it will be run with the new value whenever
+ the option is changed. This can be used to combine command-line
+ and file-based options::
+
+ define("config", type=str, help="path to config file",
+ callback=lambda path: parse_config_file(path, final=False))
+
+ With this definition, options in the file specified by ``--config`` will
+ override options set earlier on the command line, but can be overridden
+ by later flags.
+ """
+ normalized = self._normalize_name(name)
+ if normalized in self._options:
+ raise Error("Option %r already defined in %s" %
+ (normalized, self._options[normalized].file_name))
+ frame = sys._getframe(0)
+ options_file = frame.f_code.co_filename
+
+ # Can be called directly, or through top level define() fn, in which
+ # case, step up above that frame to look for real caller.
+ if (frame.f_back.f_code.co_filename == options_file and
+ frame.f_back.f_code.co_name == 'define'):
+ frame = frame.f_back
+
+ file_name = frame.f_back.f_code.co_filename
+ if file_name == options_file:
+ file_name = ""
+ if type is None:
+ if not multiple and default is not None:
+ type = default.__class__
+ else:
+ type = str
+ if group:
+ group_name = group
+ else:
+ group_name = file_name
+ option = _Option(name, file_name=file_name,
+ default=default, type=type, help=help,
+ metavar=metavar, multiple=multiple,
+ group_name=group_name,
+ callback=callback)
+ self._options[normalized] = option
+
+ def parse_command_line(self, args=None, final=True):
+ """Parses all options given on the command line (defaults to
+ `sys.argv`).
+
+ Note that ``args[0]`` is ignored since it is the program name
+ in `sys.argv`.
+
+ We return a list of all arguments that are not parsed as options.
+
+ If ``final`` is ``False``, parse callbacks will not be run.
+ This is useful for applications that wish to combine configurations
+ from multiple sources.
+ """
+ if args is None:
+ args = sys.argv
+ remaining = []
+ for i in range(1, len(args)):
+ # All things after the last option are command line arguments
+ if not args[i].startswith("-"):
+ remaining = args[i:]
+ break
+ if args[i] == "--":
+ remaining = args[i + 1:]
+ break
+ arg = args[i].lstrip("-")
+ name, equals, value = arg.partition("=")
+ name = self._normalize_name(name)
+ if name not in self._options:
+ self.print_help()
+ raise Error('Unrecognized command line option: %r' % name)
+ option = self._options[name]
+ if not equals:
+ if option.type == bool:
+ value = "true"
+ else:
+ raise Error('Option %r requires a value' % name)
+ option.parse(value)
+
+ if final:
+ self.run_parse_callbacks()
+
+ return remaining
+
+ def parse_config_file(self, path, final=True):
+ """Parses and loads the Python config file at the given path.
+
+ If ``final`` is ``False``, parse callbacks will not be run.
+ This is useful for applications that wish to combine configurations
+ from multiple sources.
+
+ .. versionchanged:: 4.1
+ Config files are now always interpreted as utf-8 instead of
+ the system default encoding.
+
+ .. versionchanged:: 4.4
+ The special variable ``__file__`` is available inside config
+ files, specifying the absolute path to the config file itself.
+ """
+ config = {'__file__': os.path.abspath(path)}
+ with open(path, 'rb') as f:
+ exec_in(native_str(f.read()), config, config)
+ for name in config:
+ normalized = self._normalize_name(name)
+ if normalized in self._options:
+ self._options[normalized].set(config[name])
+
+ if final:
+ self.run_parse_callbacks()
+
+ def print_help(self, file=None):
+ """Prints all the command line options to stderr (or another file)."""
+ if file is None:
+ file = sys.stderr
+ print("Usage: %s [OPTIONS]" % sys.argv[0], file=file)
+ print("\nOptions:\n", file=file)
+ by_group = {}
+ for option in self._options.values():
+ by_group.setdefault(option.group_name, []).append(option)
+
+ for filename, o in sorted(by_group.items()):
+ if filename:
+ print("\n%s options:\n" % os.path.normpath(filename), file=file)
+ o.sort(key=lambda option: option.name)
+ for option in o:
+ # Always print names with dashes in a CLI context.
+ prefix = self._normalize_name(option.name)
+ if option.metavar:
+ prefix += "=" + option.metavar
+ description = option.help or ""
+ if option.default is not None and option.default != '':
+ description += " (default %s)" % option.default
+ lines = textwrap.wrap(description, 79 - 35)
+ if len(prefix) > 30 or len(lines) == 0:
+ lines.insert(0, '')
+ print(" --%-30s %s" % (prefix, lines[0]), file=file)
+ for line in lines[1:]:
+ print("%-34s %s" % (' ', line), file=file)
+ print(file=file)
+
+ def _help_callback(self, value):
+ if value:
+ self.print_help()
+ sys.exit(0)
+
+ def add_parse_callback(self, callback):
+ """Adds a parse callback, to be invoked when option parsing is done."""
+ self._parse_callbacks.append(stack_context.wrap(callback))
+
+ def run_parse_callbacks(self):
+ for callback in self._parse_callbacks:
+ callback()
+
+ def mockable(self):
+ """Returns a wrapper around self that is compatible with
+ `mock.patch <unittest.mock.patch>`.
+
+ The `mock.patch <unittest.mock.patch>` function (included in
+ the standard library `unittest.mock` package since Python 3.3,
+ or in the third-party ``mock`` package for older versions of
+ Python) is incompatible with objects like ``options`` that
+ override ``__getattr__`` and ``__setattr__``. This function
+ returns an object that can be used with `mock.patch.object
+ <unittest.mock.patch.object>` to modify option values::
+
+ with mock.patch.object(options.mockable(), 'name', value):
+ assert options.name == value
+ """
+ return _Mockable(self)
+
+
+class _Mockable(object):
+ """`mock.patch` compatible wrapper for `OptionParser`.
+
+ As of ``mock`` version 1.0.1, when an object uses ``__getattr__``
+ hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete
+ the attribute it set instead of setting a new one (assuming that
+ the object does not catpure ``__setattr__``, so the patch
+ created a new attribute in ``__dict__``).
+
+ _Mockable's getattr and setattr pass through to the underlying
+ OptionParser, and delattr undoes the effect of a previous setattr.
+ """
+ def __init__(self, options):
+ # Modify __dict__ directly to bypass __setattr__
+ self.__dict__['_options'] = options
+ self.__dict__['_originals'] = {}
+
+ def __getattr__(self, name):
+ return getattr(self._options, name)
+
+ def __setattr__(self, name, value):
+ assert name not in self._originals, "don't reuse mockable objects"
+ self._originals[name] = getattr(self._options, name)
+ setattr(self._options, name, value)
+
+ def __delattr__(self, name):
+ setattr(self._options, name, self._originals.pop(name))
+
+
+class _Option(object):
+ UNSET = object()
+
+ def __init__(self, name, default=None, type=basestring_type, help=None,
+ metavar=None, multiple=False, file_name=None, group_name=None,
+ callback=None):
+ if default is None and multiple:
+ default = []
+ self.name = name
+ self.type = type
+ self.help = help
+ self.metavar = metavar
+ self.multiple = multiple
+ self.file_name = file_name
+ self.group_name = group_name
+ self.callback = callback
+ self.default = default
+ self._value = _Option.UNSET
+
+ def value(self):
+ return self.default if self._value is _Option.UNSET else self._value
+
+ def parse(self, value):
+ _parse = {
+ datetime.datetime: self._parse_datetime,
+ datetime.timedelta: self._parse_timedelta,
+ bool: self._parse_bool,
+ basestring_type: self._parse_string,
+ }.get(self.type, self.type)
+ if self.multiple:
+ self._value = []
+ for part in value.split(","):
+ if issubclass(self.type, numbers.Integral):
+ # allow ranges of the form X:Y (inclusive at both ends)
+ lo, _, hi = part.partition(":")
+ lo = _parse(lo)
+ hi = _parse(hi) if hi else lo
+ self._value.extend(range(lo, hi + 1))
+ else:
+ self._value.append(_parse(part))
+ else:
+ self._value = _parse(value)
+ if self.callback is not None:
+ self.callback(self._value)
+ return self.value()
+
+ def set(self, value):
+ if self.multiple:
+ if not isinstance(value, list):
+ raise Error("Option %r is required to be a list of %s" %
+ (self.name, self.type.__name__))
+ for item in value:
+ if item is not None and not isinstance(item, self.type):
+ raise Error("Option %r is required to be a list of %s" %
+ (self.name, self.type.__name__))
+ else:
+ if value is not None and not isinstance(value, self.type):
+ raise Error("Option %r is required to be a %s (%s given)" %
+ (self.name, self.type.__name__, type(value)))
+ self._value = value
+ if self.callback is not None:
+ self.callback(self._value)
+
+ # Supported date/time formats in our options
+ _DATETIME_FORMATS = [
+ "%a %b %d %H:%M:%S %Y",
+ "%Y-%m-%d %H:%M:%S",
+ "%Y-%m-%d %H:%M",
+ "%Y-%m-%dT%H:%M",
+ "%Y%m%d %H:%M:%S",
+ "%Y%m%d %H:%M",
+ "%Y-%m-%d",
+ "%Y%m%d",
+ "%H:%M:%S",
+ "%H:%M",
+ ]
+
+ def _parse_datetime(self, value):
+ for format in self._DATETIME_FORMATS:
+ try:
+ return datetime.datetime.strptime(value, format)
+ except ValueError:
+ pass
+ raise Error('Unrecognized date/time format: %r' % value)
+
+ _TIMEDELTA_ABBREV_DICT = {
+ 'h': 'hours',
+ 'm': 'minutes',
+ 'min': 'minutes',
+ 's': 'seconds',
+ 'sec': 'seconds',
+ 'ms': 'milliseconds',
+ 'us': 'microseconds',
+ 'd': 'days',
+ 'w': 'weeks',
+ }
+
+ _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
+
+ _TIMEDELTA_PATTERN = re.compile(
+ r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE)
+
+ def _parse_timedelta(self, value):
+ try:
+ sum = datetime.timedelta()
+ start = 0
+ while start < len(value):
+ m = self._TIMEDELTA_PATTERN.match(value, start)
+ if not m:
+ raise Exception()
+ num = float(m.group(1))
+ units = m.group(2) or 'seconds'
+ units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
+ sum += datetime.timedelta(**{units: num})
+ start = m.end()
+ return sum
+ except Exception:
+ raise
+
+ def _parse_bool(self, value):
+ return value.lower() not in ("false", "0", "f")
+
+ def _parse_string(self, value):
+ return _unicode(value)
+
+
+options = OptionParser()
+"""Global options object.
+
+All defined options are available as attributes on this object.
+"""
+
+
+def define(name, default=None, type=None, help=None, metavar=None,
+ multiple=False, group=None, callback=None):
+ """Defines an option in the global namespace.
+
+ See `OptionParser.define`.
+ """
+ return options.define(name, default=default, type=type, help=help,
+ metavar=metavar, multiple=multiple, group=group,
+ callback=callback)
+
+
+def parse_command_line(args=None, final=True):
+ """Parses global options from the command line.
+
+ See `OptionParser.parse_command_line`.
+ """
+ return options.parse_command_line(args, final=final)
+
+
+def parse_config_file(path, final=True):
+ """Parses global options from a config file.
+
+ See `OptionParser.parse_config_file`.
+ """
+ return options.parse_config_file(path, final=final)
+
+
+def print_help(file=None):
+ """Prints all the command line options to stderr (or another file).
+
+ See `OptionParser.print_help`.
+ """
+ return options.print_help(file)
+
+
+def add_parse_callback(callback):
+ """Adds a parse callback, to be invoked when option parsing is done.
+
+ See `OptionParser.add_parse_callback`
+ """
+ options.add_parse_callback(callback)
+
+
+# Default options
+define_logging_options(options)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/asyncio.py b/contrib/python/tornado/tornado-4/tornado/platform/asyncio.py
index 830ee1f3b1..e30277225f 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/asyncio.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/asyncio.py
@@ -1,222 +1,222 @@
-"""Bridges between the `asyncio` module and Tornado IOLoop.
-
-.. versionadded:: 3.2
-
-This module integrates Tornado with the ``asyncio`` module introduced
-in Python 3.4 (and available `as a separate download
-<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
-it possible to combine the two libraries on the same event loop.
-
-Most applications should use `AsyncIOMainLoop` to run Tornado on the
-default ``asyncio`` event loop. Applications that need to run event
-loops on multiple threads may use `AsyncIOLoop` to create multiple
-loops.
-
-.. note::
-
- Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of
- methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on
- Windows. Use the `~asyncio.SelectorEventLoop` instead.
-"""
-
-from __future__ import absolute_import, division, print_function
-import functools
-
-import tornado.concurrent
-from tornado.gen import convert_yielded
-from tornado.ioloop import IOLoop
-from tornado import stack_context
-
-try:
- # Import the real asyncio module for py33+ first. Older versions of the
- # trollius backport also use this name.
- import asyncio # type: ignore
-except ImportError as e:
- # Asyncio itself isn't available; see if trollius is (backport to py26+).
- try:
- import trollius as asyncio # type: ignore
- except ImportError:
- # Re-raise the original asyncio error, not the trollius one.
- raise e
-
-
-class BaseAsyncIOLoop(IOLoop):
- def initialize(self, asyncio_loop, close_loop=False, **kwargs):
- super(BaseAsyncIOLoop, self).initialize(**kwargs)
- self.asyncio_loop = asyncio_loop
- self.close_loop = close_loop
- # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
- self.handlers = {}
- # Set of fds listening for reads/writes
- self.readers = set()
- self.writers = set()
- self.closing = False
-
- def close(self, all_fds=False):
- self.closing = True
- for fd in list(self.handlers):
- fileobj, handler_func = self.handlers[fd]
- self.remove_handler(fd)
- if all_fds:
- self.close_fd(fileobj)
- if self.close_loop:
- self.asyncio_loop.close()
-
- def add_handler(self, fd, handler, events):
- fd, fileobj = self.split_fd(fd)
- if fd in self.handlers:
- raise ValueError("fd %s added twice" % fd)
- self.handlers[fd] = (fileobj, stack_context.wrap(handler))
- if events & IOLoop.READ:
- self.asyncio_loop.add_reader(
- fd, self._handle_events, fd, IOLoop.READ)
- self.readers.add(fd)
- if events & IOLoop.WRITE:
- self.asyncio_loop.add_writer(
- fd, self._handle_events, fd, IOLoop.WRITE)
- self.writers.add(fd)
-
- def update_handler(self, fd, events):
- fd, fileobj = self.split_fd(fd)
- if events & IOLoop.READ:
- if fd not in self.readers:
- self.asyncio_loop.add_reader(
- fd, self._handle_events, fd, IOLoop.READ)
- self.readers.add(fd)
- else:
- if fd in self.readers:
- self.asyncio_loop.remove_reader(fd)
- self.readers.remove(fd)
- if events & IOLoop.WRITE:
- if fd not in self.writers:
- self.asyncio_loop.add_writer(
- fd, self._handle_events, fd, IOLoop.WRITE)
- self.writers.add(fd)
- else:
- if fd in self.writers:
- self.asyncio_loop.remove_writer(fd)
- self.writers.remove(fd)
-
- def remove_handler(self, fd):
- fd, fileobj = self.split_fd(fd)
- if fd not in self.handlers:
- return
- if fd in self.readers:
- self.asyncio_loop.remove_reader(fd)
- self.readers.remove(fd)
- if fd in self.writers:
- self.asyncio_loop.remove_writer(fd)
- self.writers.remove(fd)
- del self.handlers[fd]
-
- def _handle_events(self, fd, events):
- fileobj, handler_func = self.handlers[fd]
- handler_func(fileobj, events)
-
- def start(self):
- old_current = IOLoop.current(instance=False)
- try:
- self._setup_logging()
- self.make_current()
- self.asyncio_loop.run_forever()
- finally:
- if old_current is None:
- IOLoop.clear_current()
- else:
- old_current.make_current()
-
- def stop(self):
- self.asyncio_loop.stop()
-
- def call_at(self, when, callback, *args, **kwargs):
- # asyncio.call_at supports *args but not **kwargs, so bind them here.
- # We do not synchronize self.time and asyncio_loop.time, so
- # convert from absolute to relative.
- return self.asyncio_loop.call_later(
- max(0, when - self.time()), self._run_callback,
- functools.partial(stack_context.wrap(callback), *args, **kwargs))
-
- def remove_timeout(self, timeout):
- timeout.cancel()
-
- def add_callback(self, callback, *args, **kwargs):
- if self.closing:
- # TODO: this is racy; we need a lock to ensure that the
- # loop isn't closed during call_soon_threadsafe.
- raise RuntimeError("IOLoop is closing")
- self.asyncio_loop.call_soon_threadsafe(
- self._run_callback,
- functools.partial(stack_context.wrap(callback), *args, **kwargs))
-
- add_callback_from_signal = add_callback
-
-
-class AsyncIOMainLoop(BaseAsyncIOLoop):
- """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
- current ``asyncio`` event loop (i.e. the one returned by
- ``asyncio.get_event_loop()``). Recommended usage::
-
- from tornado.platform.asyncio import AsyncIOMainLoop
- import asyncio
- AsyncIOMainLoop().install()
- asyncio.get_event_loop().run_forever()
-
- See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
- installing alternative IOLoops.
- """
- def initialize(self, **kwargs):
- super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
- close_loop=False, **kwargs)
-
-
-class AsyncIOLoop(BaseAsyncIOLoop):
- """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
- This class follows the usual Tornado semantics for creating new
- ``IOLoops``; these loops are not necessarily related to the
- ``asyncio`` default event loop. Recommended usage::
-
- from tornado.ioloop import IOLoop
- IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
- IOLoop.current().start()
-
- Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
- can be accessed with the ``asyncio_loop`` attribute.
- """
- def initialize(self, **kwargs):
- loop = asyncio.new_event_loop()
- try:
- super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
- except Exception:
- # If initialize() does not succeed (taking ownership of the loop),
- # we have to close it.
- loop.close()
- raise
-
-
-def to_tornado_future(asyncio_future):
- """Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
-
- .. versionadded:: 4.1
- """
- tf = tornado.concurrent.Future()
- tornado.concurrent.chain_future(asyncio_future, tf)
- return tf
-
-
-def to_asyncio_future(tornado_future):
- """Convert a Tornado yieldable object to an `asyncio.Future`.
-
- .. versionadded:: 4.1
-
- .. versionchanged:: 4.3
- Now accepts any yieldable object, not just
- `tornado.concurrent.Future`.
- """
- tornado_future = convert_yielded(tornado_future)
- af = asyncio.Future()
- tornado.concurrent.chain_future(tornado_future, af)
- return af
-
-
-if hasattr(convert_yielded, 'register'):
- convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
+"""Bridges between the `asyncio` module and Tornado IOLoop.
+
+.. versionadded:: 3.2
+
+This module integrates Tornado with the ``asyncio`` module introduced
+in Python 3.4 (and available `as a separate download
+<https://pypi.python.org/pypi/asyncio>`_ for Python 3.3). This makes
+it possible to combine the two libraries on the same event loop.
+
+Most applications should use `AsyncIOMainLoop` to run Tornado on the
+default ``asyncio`` event loop. Applications that need to run event
+loops on multiple threads may use `AsyncIOLoop` to create multiple
+loops.
+
+.. note::
+
+ Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of
+ methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on
+ Windows. Use the `~asyncio.SelectorEventLoop` instead.
+"""
+
+from __future__ import absolute_import, division, print_function
+import functools
+
+import tornado.concurrent
+from tornado.gen import convert_yielded
+from tornado.ioloop import IOLoop
+from tornado import stack_context
+
+try:
+ # Import the real asyncio module for py33+ first. Older versions of the
+ # trollius backport also use this name.
+ import asyncio # type: ignore
+except ImportError as e:
+ # Asyncio itself isn't available; see if trollius is (backport to py26+).
+ try:
+ import trollius as asyncio # type: ignore
+ except ImportError:
+ # Re-raise the original asyncio error, not the trollius one.
+ raise e
+
+
+class BaseAsyncIOLoop(IOLoop):
+ def initialize(self, asyncio_loop, close_loop=False, **kwargs):
+ super(BaseAsyncIOLoop, self).initialize(**kwargs)
+ self.asyncio_loop = asyncio_loop
+ self.close_loop = close_loop
+ # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
+ self.handlers = {}
+ # Set of fds listening for reads/writes
+ self.readers = set()
+ self.writers = set()
+ self.closing = False
+
+ def close(self, all_fds=False):
+ self.closing = True
+ for fd in list(self.handlers):
+ fileobj, handler_func = self.handlers[fd]
+ self.remove_handler(fd)
+ if all_fds:
+ self.close_fd(fileobj)
+ if self.close_loop:
+ self.asyncio_loop.close()
+
+ def add_handler(self, fd, handler, events):
+ fd, fileobj = self.split_fd(fd)
+ if fd in self.handlers:
+ raise ValueError("fd %s added twice" % fd)
+ self.handlers[fd] = (fileobj, stack_context.wrap(handler))
+ if events & IOLoop.READ:
+ self.asyncio_loop.add_reader(
+ fd, self._handle_events, fd, IOLoop.READ)
+ self.readers.add(fd)
+ if events & IOLoop.WRITE:
+ self.asyncio_loop.add_writer(
+ fd, self._handle_events, fd, IOLoop.WRITE)
+ self.writers.add(fd)
+
+ def update_handler(self, fd, events):
+ fd, fileobj = self.split_fd(fd)
+ if events & IOLoop.READ:
+ if fd not in self.readers:
+ self.asyncio_loop.add_reader(
+ fd, self._handle_events, fd, IOLoop.READ)
+ self.readers.add(fd)
+ else:
+ if fd in self.readers:
+ self.asyncio_loop.remove_reader(fd)
+ self.readers.remove(fd)
+ if events & IOLoop.WRITE:
+ if fd not in self.writers:
+ self.asyncio_loop.add_writer(
+ fd, self._handle_events, fd, IOLoop.WRITE)
+ self.writers.add(fd)
+ else:
+ if fd in self.writers:
+ self.asyncio_loop.remove_writer(fd)
+ self.writers.remove(fd)
+
+ def remove_handler(self, fd):
+ fd, fileobj = self.split_fd(fd)
+ if fd not in self.handlers:
+ return
+ if fd in self.readers:
+ self.asyncio_loop.remove_reader(fd)
+ self.readers.remove(fd)
+ if fd in self.writers:
+ self.asyncio_loop.remove_writer(fd)
+ self.writers.remove(fd)
+ del self.handlers[fd]
+
+ def _handle_events(self, fd, events):
+ fileobj, handler_func = self.handlers[fd]
+ handler_func(fileobj, events)
+
+ def start(self):
+ old_current = IOLoop.current(instance=False)
+ try:
+ self._setup_logging()
+ self.make_current()
+ self.asyncio_loop.run_forever()
+ finally:
+ if old_current is None:
+ IOLoop.clear_current()
+ else:
+ old_current.make_current()
+
+ def stop(self):
+ self.asyncio_loop.stop()
+
+ def call_at(self, when, callback, *args, **kwargs):
+ # asyncio.call_at supports *args but not **kwargs, so bind them here.
+ # We do not synchronize self.time and asyncio_loop.time, so
+ # convert from absolute to relative.
+ return self.asyncio_loop.call_later(
+ max(0, when - self.time()), self._run_callback,
+ functools.partial(stack_context.wrap(callback), *args, **kwargs))
+
+ def remove_timeout(self, timeout):
+ timeout.cancel()
+
+ def add_callback(self, callback, *args, **kwargs):
+ if self.closing:
+ # TODO: this is racy; we need a lock to ensure that the
+ # loop isn't closed during call_soon_threadsafe.
+ raise RuntimeError("IOLoop is closing")
+ self.asyncio_loop.call_soon_threadsafe(
+ self._run_callback,
+ functools.partial(stack_context.wrap(callback), *args, **kwargs))
+
+ add_callback_from_signal = add_callback
+
+
+class AsyncIOMainLoop(BaseAsyncIOLoop):
+ """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
+ current ``asyncio`` event loop (i.e. the one returned by
+ ``asyncio.get_event_loop()``). Recommended usage::
+
+ from tornado.platform.asyncio import AsyncIOMainLoop
+ import asyncio
+ AsyncIOMainLoop().install()
+ asyncio.get_event_loop().run_forever()
+
+ See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
+ installing alternative IOLoops.
+ """
+ def initialize(self, **kwargs):
+ super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
+ close_loop=False, **kwargs)
+
+
+class AsyncIOLoop(BaseAsyncIOLoop):
+ """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
+ This class follows the usual Tornado semantics for creating new
+ ``IOLoops``; these loops are not necessarily related to the
+ ``asyncio`` default event loop. Recommended usage::
+
+ from tornado.ioloop import IOLoop
+ IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
+ IOLoop.current().start()
+
+ Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
+ can be accessed with the ``asyncio_loop`` attribute.
+ """
+ def initialize(self, **kwargs):
+ loop = asyncio.new_event_loop()
+ try:
+ super(AsyncIOLoop, self).initialize(loop, close_loop=True, **kwargs)
+ except Exception:
+ # If initialize() does not succeed (taking ownership of the loop),
+ # we have to close it.
+ loop.close()
+ raise
+
+
+def to_tornado_future(asyncio_future):
+ """Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
+
+ .. versionadded:: 4.1
+ """
+ tf = tornado.concurrent.Future()
+ tornado.concurrent.chain_future(asyncio_future, tf)
+ return tf
+
+
+def to_asyncio_future(tornado_future):
+ """Convert a Tornado yieldable object to an `asyncio.Future`.
+
+ .. versionadded:: 4.1
+
+ .. versionchanged:: 4.3
+ Now accepts any yieldable object, not just
+ `tornado.concurrent.Future`.
+ """
+ tornado_future = convert_yielded(tornado_future)
+ af = asyncio.Future()
+ tornado.concurrent.chain_future(tornado_future, af)
+ return af
+
+
+if hasattr(convert_yielded, 'register'):
+ convert_yielded.register(asyncio.Future, to_tornado_future) # type: ignore
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/auto.py b/contrib/python/tornado/tornado-4/tornado/platform/auto.py
index 1f4d700193..6a1a2d8fa4 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/auto.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/auto.py
@@ -1,59 +1,59 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Implementation of platform-specific functionality.
-
-For each function or class described in `tornado.platform.interface`,
-the appropriate platform-specific implementation exists in this module.
-Most code that needs access to this functionality should do e.g.::
-
- from tornado.platform.auto import set_close_exec
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import os
-
-if 'APPENGINE_RUNTIME' in os.environ:
- from tornado.platform.common import Waker
-
- def set_close_exec(fd):
- pass
-elif os.name == 'nt':
- from tornado.platform.common import Waker
- from tornado.platform.windows import set_close_exec
-else:
- from tornado.platform.posix import set_close_exec, Waker
-
-try:
- # monotime monkey-patches the time module to have a monotonic function
- # in versions of python before 3.3.
- import monotime
- # Silence pyflakes warning about this unused import
- monotime
-except ImportError:
- pass
-try:
- # monotonic can provide a monotonic function in versions of python before
- # 3.3, too.
- from monotonic import monotonic as monotonic_time
-except ImportError:
- try:
- from time import monotonic as monotonic_time
- except ImportError:
- monotonic_time = None
-
-__all__ = ['Waker', 'set_close_exec', 'monotonic_time']
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of platform-specific functionality.
+
+For each function or class described in `tornado.platform.interface`,
+the appropriate platform-specific implementation exists in this module.
+Most code that needs access to this functionality should do e.g.::
+
+ from tornado.platform.auto import set_close_exec
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import os
+
+if 'APPENGINE_RUNTIME' in os.environ:
+ from tornado.platform.common import Waker
+
+ def set_close_exec(fd):
+ pass
+elif os.name == 'nt':
+ from tornado.platform.common import Waker
+ from tornado.platform.windows import set_close_exec
+else:
+ from tornado.platform.posix import set_close_exec, Waker
+
+try:
+ # monotime monkey-patches the time module to have a monotonic function
+ # in versions of python before 3.3.
+ import monotime
+ # Silence pyflakes warning about this unused import
+ monotime
+except ImportError:
+ pass
+try:
+ # monotonic can provide a monotonic function in versions of python before
+ # 3.3, too.
+ from monotonic import monotonic as monotonic_time
+except ImportError:
+ try:
+ from time import monotonic as monotonic_time
+ except ImportError:
+ monotonic_time = None
+
+__all__ = ['Waker', 'set_close_exec', 'monotonic_time']
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/caresresolver.py b/contrib/python/tornado/tornado-4/tornado/platform/caresresolver.py
index fd6e9d2748..3732ca9194 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/caresresolver.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/caresresolver.py
@@ -1,79 +1,79 @@
-from __future__ import absolute_import, division, print_function
-import pycares # type: ignore
-import socket
-
-from tornado import gen
-from tornado.ioloop import IOLoop
-from tornado.netutil import Resolver, is_valid_ip
-
-
-class CaresResolver(Resolver):
- """Name resolver based on the c-ares library.
-
- This is a non-blocking and non-threaded resolver. It may not produce
- the same results as the system resolver, but can be used for non-blocking
- resolution when threads cannot be used.
-
- c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
- so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
- the default for ``tornado.simple_httpclient``, but other libraries
- may default to ``AF_UNSPEC``.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- def initialize(self, io_loop=None):
- self.io_loop = io_loop or IOLoop.current()
- self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
- self.fds = {}
-
- def _sock_state_cb(self, fd, readable, writable):
- state = ((IOLoop.READ if readable else 0) |
- (IOLoop.WRITE if writable else 0))
- if not state:
- self.io_loop.remove_handler(fd)
- del self.fds[fd]
- elif fd in self.fds:
- self.io_loop.update_handler(fd, state)
- self.fds[fd] = state
- else:
- self.io_loop.add_handler(fd, self._handle_events, state)
- self.fds[fd] = state
-
- def _handle_events(self, fd, events):
- read_fd = pycares.ARES_SOCKET_BAD
- write_fd = pycares.ARES_SOCKET_BAD
- if events & IOLoop.READ:
- read_fd = fd
- if events & IOLoop.WRITE:
- write_fd = fd
- self.channel.process_fd(read_fd, write_fd)
-
- @gen.coroutine
- def resolve(self, host, port, family=0):
- if is_valid_ip(host):
- addresses = [host]
- else:
- # gethostbyname doesn't take callback as a kwarg
- self.channel.gethostbyname(host, family, (yield gen.Callback(1)))
- callback_args = yield gen.Wait(1)
- assert isinstance(callback_args, gen.Arguments)
- assert not callback_args.kwargs
- result, error = callback_args.args
- if error:
- raise IOError('C-Ares returned error %s: %s while resolving %s' %
- (error, pycares.errno.strerror(error), host))
- addresses = result.addresses
- addrinfo = []
- for address in addresses:
- if '.' in address:
- address_family = socket.AF_INET
- elif ':' in address:
- address_family = socket.AF_INET6
- else:
- address_family = socket.AF_UNSPEC
- if family != socket.AF_UNSPEC and family != address_family:
- raise IOError('Requested socket family %d but got %d' %
- (family, address_family))
- addrinfo.append((address_family, (address, port)))
- raise gen.Return(addrinfo)
+from __future__ import absolute_import, division, print_function
+import pycares # type: ignore
+import socket
+
+from tornado import gen
+from tornado.ioloop import IOLoop
+from tornado.netutil import Resolver, is_valid_ip
+
+
+class CaresResolver(Resolver):
+ """Name resolver based on the c-ares library.
+
+ This is a non-blocking and non-threaded resolver. It may not produce
+ the same results as the system resolver, but can be used for non-blocking
+ resolution when threads cannot be used.
+
+ c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
+ so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
+ the default for ``tornado.simple_httpclient``, but other libraries
+ may default to ``AF_UNSPEC``.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ def initialize(self, io_loop=None):
+ self.io_loop = io_loop or IOLoop.current()
+ self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
+ self.fds = {}
+
+ def _sock_state_cb(self, fd, readable, writable):
+ state = ((IOLoop.READ if readable else 0) |
+ (IOLoop.WRITE if writable else 0))
+ if not state:
+ self.io_loop.remove_handler(fd)
+ del self.fds[fd]
+ elif fd in self.fds:
+ self.io_loop.update_handler(fd, state)
+ self.fds[fd] = state
+ else:
+ self.io_loop.add_handler(fd, self._handle_events, state)
+ self.fds[fd] = state
+
+ def _handle_events(self, fd, events):
+ read_fd = pycares.ARES_SOCKET_BAD
+ write_fd = pycares.ARES_SOCKET_BAD
+ if events & IOLoop.READ:
+ read_fd = fd
+ if events & IOLoop.WRITE:
+ write_fd = fd
+ self.channel.process_fd(read_fd, write_fd)
+
+ @gen.coroutine
+ def resolve(self, host, port, family=0):
+ if is_valid_ip(host):
+ addresses = [host]
+ else:
+ # gethostbyname doesn't take callback as a kwarg
+ self.channel.gethostbyname(host, family, (yield gen.Callback(1)))
+ callback_args = yield gen.Wait(1)
+ assert isinstance(callback_args, gen.Arguments)
+ assert not callback_args.kwargs
+ result, error = callback_args.args
+ if error:
+ raise IOError('C-Ares returned error %s: %s while resolving %s' %
+ (error, pycares.errno.strerror(error), host))
+ addresses = result.addresses
+ addrinfo = []
+ for address in addresses:
+ if '.' in address:
+ address_family = socket.AF_INET
+ elif ':' in address:
+ address_family = socket.AF_INET6
+ else:
+ address_family = socket.AF_UNSPEC
+ if family != socket.AF_UNSPEC and family != address_family:
+ raise IOError('Requested socket family %d but got %d' %
+ (family, address_family))
+ addrinfo.append((address_family, (address, port)))
+ raise gen.Return(addrinfo)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/common.py b/contrib/python/tornado/tornado-4/tornado/platform/common.py
index b597748d1f..8cd6c126f5 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/common.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/common.py
@@ -1,113 +1,113 @@
-"""Lowest-common-denominator implementations of platform functionality."""
-from __future__ import absolute_import, division, print_function
-
-import errno
-import socket
-import time
-
-from tornado.platform import interface
-from tornado.util import errno_from_exception
-
-
-def try_close(f):
- # Avoid issue #875 (race condition when using the file in another
- # thread).
- for i in range(10):
- try:
- f.close()
- except IOError:
- # Yield to another thread
- time.sleep(1e-3)
- else:
- break
- # Try a last time and let raise
- f.close()
-
-
-class Waker(interface.Waker):
- """Create an OS independent asynchronous pipe.
-
- For use on platforms that don't have os.pipe() (or where pipes cannot
- be passed to select()), but do have sockets. This includes Windows
- and Jython.
- """
- def __init__(self):
- from .auto import set_close_exec
- # Based on Zope select_trigger.py:
- # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
-
- self.writer = socket.socket()
- set_close_exec(self.writer.fileno())
- # Disable buffering -- pulling the trigger sends 1 byte,
- # and we want that sent immediately, to wake up ASAP.
- self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-
- count = 0
- while 1:
- count += 1
- # Bind to a local port; for efficiency, let the OS pick
- # a free port for us.
- # Unfortunately, stress tests showed that we may not
- # be able to connect to that port ("Address already in
- # use") despite that the OS picked it. This appears
- # to be a race bug in the Windows socket implementation.
- # So we loop until a connect() succeeds (almost always
- # on the first try). See the long thread at
- # http://mail.zope.org/pipermail/zope/2005-July/160433.html
- # for hideous details.
- a = socket.socket()
- set_close_exec(a.fileno())
- a.bind(("127.0.0.1", 0))
- a.listen(1)
- connect_address = a.getsockname() # assigned (host, port) pair
- try:
- self.writer.connect(connect_address)
- break # success
- except socket.error as detail:
- if (not hasattr(errno, 'WSAEADDRINUSE') or
- errno_from_exception(detail) != errno.WSAEADDRINUSE):
- # "Address already in use" is the only error
- # I've seen on two WinXP Pro SP2 boxes, under
- # Pythons 2.3.5 and 2.4.1.
- raise
- # (10048, 'Address already in use')
- # assert count <= 2 # never triggered in Tim's tests
- if count >= 10: # I've never seen it go above 2
- a.close()
- self.writer.close()
- raise socket.error("Cannot bind trigger!")
- # Close `a` and try again. Note: I originally put a short
- # sleep() here, but it didn't appear to help or hurt.
- a.close()
-
- self.reader, addr = a.accept()
- set_close_exec(self.reader.fileno())
- self.reader.setblocking(0)
- self.writer.setblocking(0)
- a.close()
- self.reader_fd = self.reader.fileno()
-
- def fileno(self):
- return self.reader.fileno()
-
- def write_fileno(self):
- return self.writer.fileno()
-
- def wake(self):
- try:
- self.writer.send(b"x")
- except (IOError, socket.error, ValueError):
- pass
-
- def consume(self):
- try:
- while True:
- result = self.reader.recv(1024)
- if not result:
- break
- except (IOError, socket.error):
- pass
-
- def close(self):
- self.reader.close()
- try_close(self.writer)
+"""Lowest-common-denominator implementations of platform functionality."""
+from __future__ import absolute_import, division, print_function
+
+import errno
+import socket
+import time
+
+from tornado.platform import interface
+from tornado.util import errno_from_exception
+
+
+def try_close(f):
+ # Avoid issue #875 (race condition when using the file in another
+ # thread).
+ for i in range(10):
+ try:
+ f.close()
+ except IOError:
+ # Yield to another thread
+ time.sleep(1e-3)
+ else:
+ break
+ # Try a last time and let raise
+ f.close()
+
+
+class Waker(interface.Waker):
+ """Create an OS independent asynchronous pipe.
+
+ For use on platforms that don't have os.pipe() (or where pipes cannot
+ be passed to select()), but do have sockets. This includes Windows
+ and Jython.
+ """
+ def __init__(self):
+ from .auto import set_close_exec
+ # Based on Zope select_trigger.py:
+ # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
+
+ self.writer = socket.socket()
+ set_close_exec(self.writer.fileno())
+ # Disable buffering -- pulling the trigger sends 1 byte,
+ # and we want that sent immediately, to wake up ASAP.
+ self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ count = 0
+ while 1:
+ count += 1
+ # Bind to a local port; for efficiency, let the OS pick
+ # a free port for us.
+ # Unfortunately, stress tests showed that we may not
+ # be able to connect to that port ("Address already in
+ # use") despite that the OS picked it. This appears
+ # to be a race bug in the Windows socket implementation.
+ # So we loop until a connect() succeeds (almost always
+ # on the first try). See the long thread at
+ # http://mail.zope.org/pipermail/zope/2005-July/160433.html
+ # for hideous details.
+ a = socket.socket()
+ set_close_exec(a.fileno())
+ a.bind(("127.0.0.1", 0))
+ a.listen(1)
+ connect_address = a.getsockname() # assigned (host, port) pair
+ try:
+ self.writer.connect(connect_address)
+ break # success
+ except socket.error as detail:
+ if (not hasattr(errno, 'WSAEADDRINUSE') or
+ errno_from_exception(detail) != errno.WSAEADDRINUSE):
+ # "Address already in use" is the only error
+ # I've seen on two WinXP Pro SP2 boxes, under
+ # Pythons 2.3.5 and 2.4.1.
+ raise
+ # (10048, 'Address already in use')
+ # assert count <= 2 # never triggered in Tim's tests
+ if count >= 10: # I've never seen it go above 2
+ a.close()
+ self.writer.close()
+ raise socket.error("Cannot bind trigger!")
+ # Close `a` and try again. Note: I originally put a short
+ # sleep() here, but it didn't appear to help or hurt.
+ a.close()
+
+ self.reader, addr = a.accept()
+ set_close_exec(self.reader.fileno())
+ self.reader.setblocking(0)
+ self.writer.setblocking(0)
+ a.close()
+ self.reader_fd = self.reader.fileno()
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.send(b"x")
+ except (IOError, socket.error, ValueError):
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.recv(1024)
+ if not result:
+ break
+ except (IOError, socket.error):
+ pass
+
+ def close(self):
+ self.reader.close()
+ try_close(self.writer)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/epoll.py b/contrib/python/tornado/tornado-4/tornado/platform/epoll.py
index 80bfd8af4c..a5d17c6c51 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/epoll.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/epoll.py
@@ -1,26 +1,26 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""EPoll-based IOLoop implementation for Linux systems."""
-from __future__ import absolute_import, division, print_function
-
-import select
-
-from tornado.ioloop import PollIOLoop
-
-
-class EPollIOLoop(PollIOLoop):
- def initialize(self, **kwargs):
- super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs)
+#!/usr/bin/env python
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""EPoll-based IOLoop implementation for Linux systems."""
+from __future__ import absolute_import, division, print_function
+
+import select
+
+from tornado.ioloop import PollIOLoop
+
+
+class EPollIOLoop(PollIOLoop):
+ def initialize(self, **kwargs):
+ super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/interface.py b/contrib/python/tornado/tornado-4/tornado/platform/interface.py
index c0ef2905c3..682351274b 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/interface.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/interface.py
@@ -1,67 +1,67 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Interfaces for platform-specific functionality.
-
-This module exists primarily for documentation purposes and as base classes
-for other tornado.platform modules. Most code should import the appropriate
-implementation from `tornado.platform.auto`.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-
-def set_close_exec(fd):
- """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
- raise NotImplementedError()
-
-
-class Waker(object):
- """A socket-like object that can wake another thread from ``select()``.
-
- The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
- its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
- thread wants to wake up the loop, it calls `wake`. Once it has woken
- up, it will call `consume` to do any necessary per-wake cleanup. When
- the ``IOLoop`` is closed, it closes its waker too.
- """
- def fileno(self):
- """Returns the read file descriptor for this waker.
-
- Must be suitable for use with ``select()`` or equivalent on the
- local platform.
- """
- raise NotImplementedError()
-
- def write_fileno(self):
- """Returns the write file descriptor for this waker."""
- raise NotImplementedError()
-
- def wake(self):
- """Triggers activity on the waker's file descriptor."""
- raise NotImplementedError()
-
- def consume(self):
- """Called after the listen has woken up to do any necessary cleanup."""
- raise NotImplementedError()
-
- def close(self):
- """Closes the waker's file descriptor(s)."""
- raise NotImplementedError()
-
-
-def monotonic_time():
- raise NotImplementedError()
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Interfaces for platform-specific functionality.
+
+This module exists primarily for documentation purposes and as base classes
+for other tornado.platform modules. Most code should import the appropriate
+implementation from `tornado.platform.auto`.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+
+def set_close_exec(fd):
+ """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor."""
+ raise NotImplementedError()
+
+
+class Waker(object):
+ """A socket-like object that can wake another thread from ``select()``.
+
+ The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to
+ its ``select`` (or ``epoll`` or ``kqueue``) calls. When another
+ thread wants to wake up the loop, it calls `wake`. Once it has woken
+ up, it will call `consume` to do any necessary per-wake cleanup. When
+ the ``IOLoop`` is closed, it closes its waker too.
+ """
+ def fileno(self):
+ """Returns the read file descriptor for this waker.
+
+ Must be suitable for use with ``select()`` or equivalent on the
+ local platform.
+ """
+ raise NotImplementedError()
+
+ def write_fileno(self):
+ """Returns the write file descriptor for this waker."""
+ raise NotImplementedError()
+
+ def wake(self):
+ """Triggers activity on the waker's file descriptor."""
+ raise NotImplementedError()
+
+ def consume(self):
+ """Called after the listen has woken up to do any necessary cleanup."""
+ raise NotImplementedError()
+
+ def close(self):
+ """Closes the waker's file descriptor(s)."""
+ raise NotImplementedError()
+
+
+def monotonic_time():
+ raise NotImplementedError()
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/kqueue.py b/contrib/python/tornado/tornado-4/tornado/platform/kqueue.py
index 3a5d417429..d10b07c230 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/kqueue.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/kqueue.py
@@ -1,91 +1,91 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""KQueue-based IOLoop implementation for BSD/Mac systems."""
-from __future__ import absolute_import, division, print_function
-
-import select
-
-from tornado.ioloop import IOLoop, PollIOLoop
-
-assert hasattr(select, 'kqueue'), 'kqueue not supported'
-
-
-class _KQueue(object):
- """A kqueue-based event loop for BSD/Mac systems."""
- def __init__(self):
- self._kqueue = select.kqueue()
- self._active = {}
-
- def fileno(self):
- return self._kqueue.fileno()
-
- def close(self):
- self._kqueue.close()
-
- def register(self, fd, events):
- if fd in self._active:
- raise IOError("fd %s already registered" % fd)
- self._control(fd, events, select.KQ_EV_ADD)
- self._active[fd] = events
-
- def modify(self, fd, events):
- self.unregister(fd)
- self.register(fd, events)
-
- def unregister(self, fd):
- events = self._active.pop(fd)
- self._control(fd, events, select.KQ_EV_DELETE)
-
- def _control(self, fd, events, flags):
- kevents = []
- if events & IOLoop.WRITE:
- kevents.append(select.kevent(
- fd, filter=select.KQ_FILTER_WRITE, flags=flags))
- if events & IOLoop.READ:
- kevents.append(select.kevent(
- fd, filter=select.KQ_FILTER_READ, flags=flags))
- # Even though control() takes a list, it seems to return EINVAL
- # on Mac OS X (10.6) when there is more than one event in the list.
- for kevent in kevents:
- self._kqueue.control([kevent], 0)
-
- def poll(self, timeout):
- kevents = self._kqueue.control(None, 1000, timeout)
- events = {}
- for kevent in kevents:
- fd = kevent.ident
- if kevent.filter == select.KQ_FILTER_READ:
- events[fd] = events.get(fd, 0) | IOLoop.READ
- if kevent.filter == select.KQ_FILTER_WRITE:
- if kevent.flags & select.KQ_EV_EOF:
- # If an asynchronous connection is refused, kqueue
- # returns a write event with the EOF flag set.
- # Turn this into an error for consistency with the
- # other IOLoop implementations.
- # Note that for read events, EOF may be returned before
- # all data has been consumed from the socket buffer,
- # so we only check for EOF on write events.
- events[fd] = IOLoop.ERROR
- else:
- events[fd] = events.get(fd, 0) | IOLoop.WRITE
- if kevent.flags & select.KQ_EV_ERROR:
- events[fd] = events.get(fd, 0) | IOLoop.ERROR
- return events.items()
-
-
-class KQueueIOLoop(PollIOLoop):
- def initialize(self, **kwargs):
- super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs)
+#!/usr/bin/env python
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""KQueue-based IOLoop implementation for BSD/Mac systems."""
+from __future__ import absolute_import, division, print_function
+
+import select
+
+from tornado.ioloop import IOLoop, PollIOLoop
+
+assert hasattr(select, 'kqueue'), 'kqueue not supported'
+
+
+class _KQueue(object):
+ """A kqueue-based event loop for BSD/Mac systems."""
+ def __init__(self):
+ self._kqueue = select.kqueue()
+ self._active = {}
+
+ def fileno(self):
+ return self._kqueue.fileno()
+
+ def close(self):
+ self._kqueue.close()
+
+ def register(self, fd, events):
+ if fd in self._active:
+ raise IOError("fd %s already registered" % fd)
+ self._control(fd, events, select.KQ_EV_ADD)
+ self._active[fd] = events
+
+ def modify(self, fd, events):
+ self.unregister(fd)
+ self.register(fd, events)
+
+ def unregister(self, fd):
+ events = self._active.pop(fd)
+ self._control(fd, events, select.KQ_EV_DELETE)
+
+ def _control(self, fd, events, flags):
+ kevents = []
+ if events & IOLoop.WRITE:
+ kevents.append(select.kevent(
+ fd, filter=select.KQ_FILTER_WRITE, flags=flags))
+ if events & IOLoop.READ:
+ kevents.append(select.kevent(
+ fd, filter=select.KQ_FILTER_READ, flags=flags))
+ # Even though control() takes a list, it seems to return EINVAL
+ # on Mac OS X (10.6) when there is more than one event in the list.
+ for kevent in kevents:
+ self._kqueue.control([kevent], 0)
+
+ def poll(self, timeout):
+ kevents = self._kqueue.control(None, 1000, timeout)
+ events = {}
+ for kevent in kevents:
+ fd = kevent.ident
+ if kevent.filter == select.KQ_FILTER_READ:
+ events[fd] = events.get(fd, 0) | IOLoop.READ
+ if kevent.filter == select.KQ_FILTER_WRITE:
+ if kevent.flags & select.KQ_EV_EOF:
+ # If an asynchronous connection is refused, kqueue
+ # returns a write event with the EOF flag set.
+ # Turn this into an error for consistency with the
+ # other IOLoop implementations.
+ # Note that for read events, EOF may be returned before
+ # all data has been consumed from the socket buffer,
+ # so we only check for EOF on write events.
+ events[fd] = IOLoop.ERROR
+ else:
+ events[fd] = events.get(fd, 0) | IOLoop.WRITE
+ if kevent.flags & select.KQ_EV_ERROR:
+ events[fd] = events.get(fd, 0) | IOLoop.ERROR
+ return events.items()
+
+
+class KQueueIOLoop(PollIOLoop):
+ def initialize(self, **kwargs):
+ super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/posix.py b/contrib/python/tornado/tornado-4/tornado/platform/posix.py
index 9bf1f18868..3ad7634ec2 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/posix.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/posix.py
@@ -1,70 +1,70 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Posix implementations of platform-specific functionality."""
-
-from __future__ import absolute_import, division, print_function
-
-import fcntl
-import os
-
-from tornado.platform import common, interface
-
-
-def set_close_exec(fd):
- flags = fcntl.fcntl(fd, fcntl.F_GETFD)
- fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
-
-
-def _set_nonblocking(fd):
- flags = fcntl.fcntl(fd, fcntl.F_GETFL)
- fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
-
-
-class Waker(interface.Waker):
- def __init__(self):
- r, w = os.pipe()
- _set_nonblocking(r)
- _set_nonblocking(w)
- set_close_exec(r)
- set_close_exec(w)
- self.reader = os.fdopen(r, "rb", 0)
- self.writer = os.fdopen(w, "wb", 0)
-
- def fileno(self):
- return self.reader.fileno()
-
- def write_fileno(self):
- return self.writer.fileno()
-
- def wake(self):
- try:
- self.writer.write(b"x")
- except (IOError, ValueError):
- pass
-
- def consume(self):
- try:
- while True:
- result = self.reader.read()
- if not result:
- break
- except IOError:
- pass
-
- def close(self):
- self.reader.close()
- common.try_close(self.writer)
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Posix implementations of platform-specific functionality."""
+
+from __future__ import absolute_import, division, print_function
+
+import fcntl
+import os
+
+from tornado.platform import common, interface
+
+
+def set_close_exec(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+
+def _set_nonblocking(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+
+class Waker(interface.Waker):
+ def __init__(self):
+ r, w = os.pipe()
+ _set_nonblocking(r)
+ _set_nonblocking(w)
+ set_close_exec(r)
+ set_close_exec(w)
+ self.reader = os.fdopen(r, "rb", 0)
+ self.writer = os.fdopen(w, "wb", 0)
+
+ def fileno(self):
+ return self.reader.fileno()
+
+ def write_fileno(self):
+ return self.writer.fileno()
+
+ def wake(self):
+ try:
+ self.writer.write(b"x")
+ except (IOError, ValueError):
+ pass
+
+ def consume(self):
+ try:
+ while True:
+ result = self.reader.read()
+ if not result:
+ break
+ except IOError:
+ pass
+
+ def close(self):
+ self.reader.close()
+ common.try_close(self.writer)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/select.py b/contrib/python/tornado/tornado-4/tornado/platform/select.py
index a18049f7cd..2dd66654c5 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/select.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/select.py
@@ -1,76 +1,76 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Select-based IOLoop implementation.
-
-Used as a fallback for systems that don't support epoll or kqueue.
-"""
-from __future__ import absolute_import, division, print_function
-
-import select
-
-from tornado.ioloop import IOLoop, PollIOLoop
-
-
-class _Select(object):
- """A simple, select()-based IOLoop implementation for non-Linux systems"""
- def __init__(self):
- self.read_fds = set()
- self.write_fds = set()
- self.error_fds = set()
- self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
-
- def close(self):
- pass
-
- def register(self, fd, events):
- if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
- raise IOError("fd %s already registered" % fd)
- if events & IOLoop.READ:
- self.read_fds.add(fd)
- if events & IOLoop.WRITE:
- self.write_fds.add(fd)
- if events & IOLoop.ERROR:
- self.error_fds.add(fd)
- # Closed connections are reported as errors by epoll and kqueue,
- # but as zero-byte reads by select, so when errors are requested
- # we need to listen for both read and error.
- # self.read_fds.add(fd)
-
- def modify(self, fd, events):
- self.unregister(fd)
- self.register(fd, events)
-
- def unregister(self, fd):
- self.read_fds.discard(fd)
- self.write_fds.discard(fd)
- self.error_fds.discard(fd)
-
- def poll(self, timeout):
- readable, writeable, errors = select.select(
- self.read_fds, self.write_fds, self.error_fds, timeout)
- events = {}
- for fd in readable:
- events[fd] = events.get(fd, 0) | IOLoop.READ
- for fd in writeable:
- events[fd] = events.get(fd, 0) | IOLoop.WRITE
- for fd in errors:
- events[fd] = events.get(fd, 0) | IOLoop.ERROR
- return events.items()
-
-
-class SelectIOLoop(PollIOLoop):
- def initialize(self, **kwargs):
- super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs)
+#!/usr/bin/env python
+#
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Select-based IOLoop implementation.
+
+Used as a fallback for systems that don't support epoll or kqueue.
+"""
+from __future__ import absolute_import, division, print_function
+
+import select
+
+from tornado.ioloop import IOLoop, PollIOLoop
+
+
+class _Select(object):
+ """A simple, select()-based IOLoop implementation for non-Linux systems"""
+ def __init__(self):
+ self.read_fds = set()
+ self.write_fds = set()
+ self.error_fds = set()
+ self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
+
+ def close(self):
+ pass
+
+ def register(self, fd, events):
+ if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
+ raise IOError("fd %s already registered" % fd)
+ if events & IOLoop.READ:
+ self.read_fds.add(fd)
+ if events & IOLoop.WRITE:
+ self.write_fds.add(fd)
+ if events & IOLoop.ERROR:
+ self.error_fds.add(fd)
+ # Closed connections are reported as errors by epoll and kqueue,
+ # but as zero-byte reads by select, so when errors are requested
+ # we need to listen for both read and error.
+ # self.read_fds.add(fd)
+
+ def modify(self, fd, events):
+ self.unregister(fd)
+ self.register(fd, events)
+
+ def unregister(self, fd):
+ self.read_fds.discard(fd)
+ self.write_fds.discard(fd)
+ self.error_fds.discard(fd)
+
+ def poll(self, timeout):
+ readable, writeable, errors = select.select(
+ self.read_fds, self.write_fds, self.error_fds, timeout)
+ events = {}
+ for fd in readable:
+ events[fd] = events.get(fd, 0) | IOLoop.READ
+ for fd in writeable:
+ events[fd] = events.get(fd, 0) | IOLoop.WRITE
+ for fd in errors:
+ events[fd] = events.get(fd, 0) | IOLoop.ERROR
+ return events.items()
+
+
+class SelectIOLoop(PollIOLoop):
+ def initialize(self, **kwargs):
+ super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs)
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/twisted.py b/contrib/python/tornado/tornado-4/tornado/platform/twisted.py
index 0f9787e84d..7e1b18d84c 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/twisted.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/twisted.py
@@ -1,591 +1,591 @@
-# Author: Ovidiu Predescu
-# Date: July 2011
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Bridges between the Twisted reactor and Tornado IOLoop.
-
-This module lets you run applications and libraries written for
-Twisted in a Tornado application. It can be used in two modes,
-depending on which library's underlying event loop you want to use.
-
-This module has been tested with Twisted versions 11.0.0 and newer.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import datetime
-import functools
-import numbers
-import socket
-import sys
-
-import twisted.internet.abstract # type: ignore
-from twisted.internet.defer import Deferred # type: ignore
-from twisted.internet.posixbase import PosixReactorBase # type: ignore
-from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore
-from twisted.python import failure, log # type: ignore
-from twisted.internet import error # type: ignore
-import twisted.names.cache # type: ignore
-import twisted.names.client # type: ignore
-import twisted.names.hosts # type: ignore
-import twisted.names.resolve # type: ignore
-
-from zope.interface import implementer # type: ignore
-
-from tornado.concurrent import Future
-from tornado.escape import utf8
-from tornado import gen
-import tornado.ioloop
-from tornado.log import app_log
-from tornado.netutil import Resolver
-from tornado.stack_context import NullContext, wrap
-from tornado.ioloop import IOLoop
-from tornado.util import timedelta_to_seconds
-
-
-@implementer(IDelayedCall)
-class TornadoDelayedCall(object):
- """DelayedCall object for Tornado."""
- def __init__(self, reactor, seconds, f, *args, **kw):
- self._reactor = reactor
- self._func = functools.partial(f, *args, **kw)
- self._time = self._reactor.seconds() + seconds
- self._timeout = self._reactor._io_loop.add_timeout(self._time,
- self._called)
- self._active = True
-
- def _called(self):
- self._active = False
- self._reactor._removeDelayedCall(self)
- try:
- self._func()
- except:
- app_log.error("_called caught exception", exc_info=True)
-
- def getTime(self):
- return self._time
-
- def cancel(self):
- self._active = False
- self._reactor._io_loop.remove_timeout(self._timeout)
- self._reactor._removeDelayedCall(self)
-
- def delay(self, seconds):
- self._reactor._io_loop.remove_timeout(self._timeout)
- self._time += seconds
- self._timeout = self._reactor._io_loop.add_timeout(self._time,
- self._called)
-
- def reset(self, seconds):
- self._reactor._io_loop.remove_timeout(self._timeout)
- self._time = self._reactor.seconds() + seconds
- self._timeout = self._reactor._io_loop.add_timeout(self._time,
- self._called)
-
- def active(self):
- return self._active
-
-
-@implementer(IReactorTime, IReactorFDSet)
-class TornadoReactor(PosixReactorBase):
- """Twisted reactor built on the Tornado IOLoop.
-
- `TornadoReactor` implements the Twisted reactor interface on top of
- the Tornado IOLoop. To use it, simply call `install` at the beginning
- of the application::
-
- import tornado.platform.twisted
- tornado.platform.twisted.install()
- from twisted.internet import reactor
-
- When the app is ready to start, call ``IOLoop.current().start()``
- instead of ``reactor.run()``.
-
- It is also possible to create a non-global reactor by calling
- ``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
- the `.IOLoop` and reactor are to be short-lived (such as those used in
- unit tests), additional cleanup may be required. Specifically, it is
- recommended to call::
-
- reactor.fireSystemEvent('shutdown')
- reactor.disconnectAll()
-
- before closing the `.IOLoop`.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- def __init__(self, io_loop=None):
- if not io_loop:
- io_loop = tornado.ioloop.IOLoop.current()
- self._io_loop = io_loop
- self._readers = {} # map of reader objects to fd
- self._writers = {} # map of writer objects to fd
- self._fds = {} # a map of fd to a (reader, writer) tuple
- self._delayedCalls = {}
- PosixReactorBase.__init__(self)
- self.addSystemEventTrigger('during', 'shutdown', self.crash)
-
- # IOLoop.start() bypasses some of the reactor initialization.
- # Fire off the necessary events if they weren't already triggered
- # by reactor.run().
- def start_if_necessary():
- if not self._started:
- self.fireSystemEvent('startup')
- self._io_loop.add_callback(start_if_necessary)
-
- # IReactorTime
- def seconds(self):
- return self._io_loop.time()
-
- def callLater(self, seconds, f, *args, **kw):
- dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
- self._delayedCalls[dc] = True
- return dc
-
- def getDelayedCalls(self):
- return [x for x in self._delayedCalls if x._active]
-
- def _removeDelayedCall(self, dc):
- if dc in self._delayedCalls:
- del self._delayedCalls[dc]
-
- # IReactorThreads
- def callFromThread(self, f, *args, **kw):
- assert callable(f), "%s is not callable" % f
- with NullContext():
- # This NullContext is mainly for an edge case when running
- # TwistedIOLoop on top of a TornadoReactor.
- # TwistedIOLoop.add_callback uses reactor.callFromThread and
- # should not pick up additional StackContexts along the way.
- self._io_loop.add_callback(f, *args, **kw)
-
- # We don't need the waker code from the super class, Tornado uses
- # its own waker.
- def installWaker(self):
- pass
-
- def wakeUp(self):
- pass
-
- # IReactorFDSet
- def _invoke_callback(self, fd, events):
- if fd not in self._fds:
- return
- (reader, writer) = self._fds[fd]
- if reader:
- err = None
- if reader.fileno() == -1:
- err = error.ConnectionLost()
- elif events & IOLoop.READ:
- err = log.callWithLogger(reader, reader.doRead)
- if err is None and events & IOLoop.ERROR:
- err = error.ConnectionLost()
- if err is not None:
- self.removeReader(reader)
- reader.readConnectionLost(failure.Failure(err))
- if writer:
- err = None
- if writer.fileno() == -1:
- err = error.ConnectionLost()
- elif events & IOLoop.WRITE:
- err = log.callWithLogger(writer, writer.doWrite)
- if err is None and events & IOLoop.ERROR:
- err = error.ConnectionLost()
- if err is not None:
- self.removeWriter(writer)
- writer.writeConnectionLost(failure.Failure(err))
-
- def addReader(self, reader):
- if reader in self._readers:
- # Don't add the reader if it's already there
- return
- fd = reader.fileno()
- self._readers[reader] = fd
- if fd in self._fds:
- (_, writer) = self._fds[fd]
- self._fds[fd] = (reader, writer)
- if writer:
- # We already registered this fd for write events,
- # update it for read events as well.
- self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
- else:
- with NullContext():
- self._fds[fd] = (reader, None)
- self._io_loop.add_handler(fd, self._invoke_callback,
- IOLoop.READ)
-
- def addWriter(self, writer):
- if writer in self._writers:
- return
- fd = writer.fileno()
- self._writers[writer] = fd
- if fd in self._fds:
- (reader, _) = self._fds[fd]
- self._fds[fd] = (reader, writer)
- if reader:
- # We already registered this fd for read events,
- # update it for write events as well.
- self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
- else:
- with NullContext():
- self._fds[fd] = (None, writer)
- self._io_loop.add_handler(fd, self._invoke_callback,
- IOLoop.WRITE)
-
- def removeReader(self, reader):
- if reader in self._readers:
- fd = self._readers.pop(reader)
- (_, writer) = self._fds[fd]
- if writer:
- # We have a writer so we need to update the IOLoop for
- # write events only.
- self._fds[fd] = (None, writer)
- self._io_loop.update_handler(fd, IOLoop.WRITE)
- else:
- # Since we have no writer registered, we remove the
- # entry from _fds and unregister the handler from the
- # IOLoop
- del self._fds[fd]
- self._io_loop.remove_handler(fd)
-
- def removeWriter(self, writer):
- if writer in self._writers:
- fd = self._writers.pop(writer)
- (reader, _) = self._fds[fd]
- if reader:
- # We have a reader so we need to update the IOLoop for
- # read events only.
- self._fds[fd] = (reader, None)
- self._io_loop.update_handler(fd, IOLoop.READ)
- else:
- # Since we have no reader registered, we remove the
- # entry from the _fds and unregister the handler from
- # the IOLoop.
- del self._fds[fd]
- self._io_loop.remove_handler(fd)
-
- def removeAll(self):
- return self._removeAll(self._readers, self._writers)
-
- def getReaders(self):
- return self._readers.keys()
-
- def getWriters(self):
- return self._writers.keys()
-
- # The following functions are mainly used in twisted-style test cases;
- # it is expected that most users of the TornadoReactor will call
- # IOLoop.start() instead of Reactor.run().
- def stop(self):
- PosixReactorBase.stop(self)
- fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
- self._io_loop.add_callback(fire_shutdown)
-
- def crash(self):
- PosixReactorBase.crash(self)
- self._io_loop.stop()
-
- def doIteration(self, delay):
- raise NotImplementedError("doIteration")
-
- def mainLoop(self):
- # Since this class is intended to be used in applications
- # where the top-level event loop is ``io_loop.start()`` rather
- # than ``reactor.run()``, it is implemented a little
- # differently than other Twisted reactors. We override
- # ``mainLoop`` instead of ``doIteration`` and must implement
- # timed call functionality on top of `.IOLoop.add_timeout`
- # rather than using the implementation in
- # ``PosixReactorBase``.
- self._io_loop.start()
-
-
-class _TestReactor(TornadoReactor):
- """Subclass of TornadoReactor for use in unittests.
-
- This can't go in the test.py file because of import-order dependencies
- with the Twisted reactor test builder.
- """
- def __init__(self):
- # always use a new ioloop
- super(_TestReactor, self).__init__(IOLoop())
-
- def listenTCP(self, port, factory, backlog=50, interface=''):
- # default to localhost to avoid firewall prompts on the mac
- if not interface:
- interface = '127.0.0.1'
- return super(_TestReactor, self).listenTCP(
- port, factory, backlog=backlog, interface=interface)
-
- def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
- if not interface:
- interface = '127.0.0.1'
- return super(_TestReactor, self).listenUDP(
- port, protocol, interface=interface, maxPacketSize=maxPacketSize)
-
-
-def install(io_loop=None):
- """Install this package as the default Twisted reactor.
-
- ``install()`` must be called very early in the startup process,
- before most other twisted-related imports. Conversely, because it
- initializes the `.IOLoop`, it cannot be called before
- `.fork_processes` or multi-process `~.TCPServer.start`. These
- conflicting requirements make it difficult to use `.TornadoReactor`
- in multi-process mode, and an external process manager such as
- ``supervisord`` is recommended instead.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
-
- """
- if not io_loop:
- io_loop = tornado.ioloop.IOLoop.current()
- reactor = TornadoReactor(io_loop)
- from twisted.internet.main import installReactor # type: ignore
- installReactor(reactor)
- return reactor
-
-
-@implementer(IReadDescriptor, IWriteDescriptor)
-class _FD(object):
- def __init__(self, fd, fileobj, handler):
- self.fd = fd
- self.fileobj = fileobj
- self.handler = handler
- self.reading = False
- self.writing = False
- self.lost = False
-
- def fileno(self):
- return self.fd
-
- def doRead(self):
- if not self.lost:
- self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
-
- def doWrite(self):
- if not self.lost:
- self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
-
- def connectionLost(self, reason):
- if not self.lost:
- self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
- self.lost = True
-
- def logPrefix(self):
- return ''
-
-
-class TwistedIOLoop(tornado.ioloop.IOLoop):
- """IOLoop implementation that runs on Twisted.
-
- `TwistedIOLoop` implements the Tornado IOLoop interface on top of
- the Twisted reactor. Recommended usage::
-
- from tornado.platform.twisted import TwistedIOLoop
- from twisted.internet import reactor
- TwistedIOLoop().install()
- # Set up your tornado application as usual using `IOLoop.instance`
- reactor.run()
-
- Uses the global Twisted reactor by default. To create multiple
- ``TwistedIOLoops`` in the same process, you must pass a unique reactor
- when constructing each one.
-
- Not compatible with `tornado.process.Subprocess.set_exit_callback`
- because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
- with each other.
-
- See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
- installing alternative IOLoops.
- """
- def initialize(self, reactor=None, **kwargs):
- super(TwistedIOLoop, self).initialize(**kwargs)
- if reactor is None:
- import twisted.internet.reactor # type: ignore
- reactor = twisted.internet.reactor
- self.reactor = reactor
- self.fds = {}
-
- def close(self, all_fds=False):
- fds = self.fds
- self.reactor.removeAll()
- for c in self.reactor.getDelayedCalls():
- c.cancel()
- if all_fds:
- for fd in fds.values():
- self.close_fd(fd.fileobj)
-
- def add_handler(self, fd, handler, events):
- if fd in self.fds:
- raise ValueError('fd %s added twice' % fd)
- fd, fileobj = self.split_fd(fd)
- self.fds[fd] = _FD(fd, fileobj, wrap(handler))
- if events & tornado.ioloop.IOLoop.READ:
- self.fds[fd].reading = True
- self.reactor.addReader(self.fds[fd])
- if events & tornado.ioloop.IOLoop.WRITE:
- self.fds[fd].writing = True
- self.reactor.addWriter(self.fds[fd])
-
- def update_handler(self, fd, events):
- fd, fileobj = self.split_fd(fd)
- if events & tornado.ioloop.IOLoop.READ:
- if not self.fds[fd].reading:
- self.fds[fd].reading = True
- self.reactor.addReader(self.fds[fd])
- else:
- if self.fds[fd].reading:
- self.fds[fd].reading = False
- self.reactor.removeReader(self.fds[fd])
- if events & tornado.ioloop.IOLoop.WRITE:
- if not self.fds[fd].writing:
- self.fds[fd].writing = True
- self.reactor.addWriter(self.fds[fd])
- else:
- if self.fds[fd].writing:
- self.fds[fd].writing = False
- self.reactor.removeWriter(self.fds[fd])
-
- def remove_handler(self, fd):
- fd, fileobj = self.split_fd(fd)
- if fd not in self.fds:
- return
- self.fds[fd].lost = True
- if self.fds[fd].reading:
- self.reactor.removeReader(self.fds[fd])
- if self.fds[fd].writing:
- self.reactor.removeWriter(self.fds[fd])
- del self.fds[fd]
-
- def start(self):
- old_current = IOLoop.current(instance=False)
- try:
- self._setup_logging()
- self.make_current()
- self.reactor.run()
- finally:
- if old_current is None:
- IOLoop.clear_current()
- else:
- old_current.make_current()
-
- def stop(self):
- self.reactor.crash()
-
- def add_timeout(self, deadline, callback, *args, **kwargs):
- # This method could be simplified (since tornado 4.0) by
- # overriding call_at instead of add_timeout, but we leave it
- # for now as a test of backwards-compatibility.
- if isinstance(deadline, numbers.Real):
- delay = max(deadline - self.time(), 0)
- elif isinstance(deadline, datetime.timedelta):
- delay = timedelta_to_seconds(deadline)
- else:
- raise TypeError("Unsupported deadline %r")
- return self.reactor.callLater(
- delay, self._run_callback,
- functools.partial(wrap(callback), *args, **kwargs))
-
- def remove_timeout(self, timeout):
- if timeout.active():
- timeout.cancel()
-
- def add_callback(self, callback, *args, **kwargs):
- self.reactor.callFromThread(
- self._run_callback,
- functools.partial(wrap(callback), *args, **kwargs))
-
- def add_callback_from_signal(self, callback, *args, **kwargs):
- self.add_callback(callback, *args, **kwargs)
-
-
-class TwistedResolver(Resolver):
- """Twisted-based asynchronous resolver.
-
- This is a non-blocking and non-threaded resolver. It is
- recommended only when threads cannot be used, since it has
- limitations compared to the standard ``getaddrinfo``-based
- `~tornado.netutil.Resolver` and
- `~tornado.netutil.ThreadedResolver`. Specifically, it returns at
- most one result, and arguments other than ``host`` and ``family``
- are ignored. It may fail to resolve when ``family`` is not
- ``socket.AF_UNSPEC``.
-
- Requires Twisted 12.1 or newer.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- def initialize(self, io_loop=None):
- self.io_loop = io_loop or IOLoop.current()
- # partial copy of twisted.names.client.createResolver, which doesn't
- # allow for a reactor to be passed in.
- self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
-
- host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
- cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
- real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
- reactor=self.reactor)
- self.resolver = twisted.names.resolve.ResolverChain(
- [host_resolver, cache_resolver, real_resolver])
-
- @gen.coroutine
- def resolve(self, host, port, family=0):
- # getHostByName doesn't accept IP addresses, so if the input
- # looks like an IP address just return it immediately.
- if twisted.internet.abstract.isIPAddress(host):
- resolved = host
- resolved_family = socket.AF_INET
- elif twisted.internet.abstract.isIPv6Address(host):
- resolved = host
- resolved_family = socket.AF_INET6
- else:
- deferred = self.resolver.getHostByName(utf8(host))
- resolved = yield gen.Task(deferred.addBoth)
- if isinstance(resolved, failure.Failure):
- try:
- resolved.raiseException()
- except twisted.names.error.DomainError as e:
- raise IOError(e)
- elif twisted.internet.abstract.isIPAddress(resolved):
- resolved_family = socket.AF_INET
- elif twisted.internet.abstract.isIPv6Address(resolved):
- resolved_family = socket.AF_INET6
- else:
- resolved_family = socket.AF_UNSPEC
- if family != socket.AF_UNSPEC and family != resolved_family:
- raise Exception('Requested socket family %d but got %d' %
- (family, resolved_family))
- result = [
- (resolved_family, (resolved, port)),
- ]
- raise gen.Return(result)
-
-
-if hasattr(gen.convert_yielded, 'register'):
- @gen.convert_yielded.register(Deferred) # type: ignore
- def _(d):
- f = Future()
-
- def errback(failure):
- try:
- failure.raiseException()
- # Should never happen, but just in case
- raise Exception("errback called without error")
- except:
- f.set_exc_info(sys.exc_info())
- d.addCallbacks(f.set_result, errback)
- return f
+# Author: Ovidiu Predescu
+# Date: July 2011
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Bridges between the Twisted reactor and Tornado IOLoop.
+
+This module lets you run applications and libraries written for
+Twisted in a Tornado application. It can be used in two modes,
+depending on which library's underlying event loop you want to use.
+
+This module has been tested with Twisted versions 11.0.0 and newer.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import datetime
+import functools
+import numbers
+import socket
+import sys
+
+import twisted.internet.abstract # type: ignore
+from twisted.internet.defer import Deferred # type: ignore
+from twisted.internet.posixbase import PosixReactorBase # type: ignore
+from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore
+from twisted.python import failure, log # type: ignore
+from twisted.internet import error # type: ignore
+import twisted.names.cache # type: ignore
+import twisted.names.client # type: ignore
+import twisted.names.hosts # type: ignore
+import twisted.names.resolve # type: ignore
+
+from zope.interface import implementer # type: ignore
+
+from tornado.concurrent import Future
+from tornado.escape import utf8
+from tornado import gen
+import tornado.ioloop
+from tornado.log import app_log
+from tornado.netutil import Resolver
+from tornado.stack_context import NullContext, wrap
+from tornado.ioloop import IOLoop
+from tornado.util import timedelta_to_seconds
+
+
+@implementer(IDelayedCall)
+class TornadoDelayedCall(object):
+ """DelayedCall object for Tornado."""
+ def __init__(self, reactor, seconds, f, *args, **kw):
+ self._reactor = reactor
+ self._func = functools.partial(f, *args, **kw)
+ self._time = self._reactor.seconds() + seconds
+ self._timeout = self._reactor._io_loop.add_timeout(self._time,
+ self._called)
+ self._active = True
+
+ def _called(self):
+ self._active = False
+ self._reactor._removeDelayedCall(self)
+ try:
+ self._func()
+ except:
+ app_log.error("_called caught exception", exc_info=True)
+
+ def getTime(self):
+ return self._time
+
+ def cancel(self):
+ self._active = False
+ self._reactor._io_loop.remove_timeout(self._timeout)
+ self._reactor._removeDelayedCall(self)
+
+ def delay(self, seconds):
+ self._reactor._io_loop.remove_timeout(self._timeout)
+ self._time += seconds
+ self._timeout = self._reactor._io_loop.add_timeout(self._time,
+ self._called)
+
+ def reset(self, seconds):
+ self._reactor._io_loop.remove_timeout(self._timeout)
+ self._time = self._reactor.seconds() + seconds
+ self._timeout = self._reactor._io_loop.add_timeout(self._time,
+ self._called)
+
+ def active(self):
+ return self._active
+
+
+@implementer(IReactorTime, IReactorFDSet)
+class TornadoReactor(PosixReactorBase):
+ """Twisted reactor built on the Tornado IOLoop.
+
+ `TornadoReactor` implements the Twisted reactor interface on top of
+ the Tornado IOLoop. To use it, simply call `install` at the beginning
+ of the application::
+
+ import tornado.platform.twisted
+ tornado.platform.twisted.install()
+ from twisted.internet import reactor
+
+ When the app is ready to start, call ``IOLoop.current().start()``
+ instead of ``reactor.run()``.
+
+ It is also possible to create a non-global reactor by calling
+ ``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
+ the `.IOLoop` and reactor are to be short-lived (such as those used in
+ unit tests), additional cleanup may be required. Specifically, it is
+ recommended to call::
+
+ reactor.fireSystemEvent('shutdown')
+ reactor.disconnectAll()
+
+ before closing the `.IOLoop`.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ def __init__(self, io_loop=None):
+ if not io_loop:
+ io_loop = tornado.ioloop.IOLoop.current()
+ self._io_loop = io_loop
+ self._readers = {} # map of reader objects to fd
+ self._writers = {} # map of writer objects to fd
+ self._fds = {} # a map of fd to a (reader, writer) tuple
+ self._delayedCalls = {}
+ PosixReactorBase.__init__(self)
+ self.addSystemEventTrigger('during', 'shutdown', self.crash)
+
+ # IOLoop.start() bypasses some of the reactor initialization.
+ # Fire off the necessary events if they weren't already triggered
+ # by reactor.run().
+ def start_if_necessary():
+ if not self._started:
+ self.fireSystemEvent('startup')
+ self._io_loop.add_callback(start_if_necessary)
+
+ # IReactorTime
+ def seconds(self):
+ return self._io_loop.time()
+
+ def callLater(self, seconds, f, *args, **kw):
+ dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
+ self._delayedCalls[dc] = True
+ return dc
+
+ def getDelayedCalls(self):
+ return [x for x in self._delayedCalls if x._active]
+
+ def _removeDelayedCall(self, dc):
+ if dc in self._delayedCalls:
+ del self._delayedCalls[dc]
+
+ # IReactorThreads
+ def callFromThread(self, f, *args, **kw):
+ assert callable(f), "%s is not callable" % f
+ with NullContext():
+ # This NullContext is mainly for an edge case when running
+ # TwistedIOLoop on top of a TornadoReactor.
+ # TwistedIOLoop.add_callback uses reactor.callFromThread and
+ # should not pick up additional StackContexts along the way.
+ self._io_loop.add_callback(f, *args, **kw)
+
+ # We don't need the waker code from the super class, Tornado uses
+ # its own waker.
+ def installWaker(self):
+ pass
+
+ def wakeUp(self):
+ pass
+
+ # IReactorFDSet
+ def _invoke_callback(self, fd, events):
+ if fd not in self._fds:
+ return
+ (reader, writer) = self._fds[fd]
+ if reader:
+ err = None
+ if reader.fileno() == -1:
+ err = error.ConnectionLost()
+ elif events & IOLoop.READ:
+ err = log.callWithLogger(reader, reader.doRead)
+ if err is None and events & IOLoop.ERROR:
+ err = error.ConnectionLost()
+ if err is not None:
+ self.removeReader(reader)
+ reader.readConnectionLost(failure.Failure(err))
+ if writer:
+ err = None
+ if writer.fileno() == -1:
+ err = error.ConnectionLost()
+ elif events & IOLoop.WRITE:
+ err = log.callWithLogger(writer, writer.doWrite)
+ if err is None and events & IOLoop.ERROR:
+ err = error.ConnectionLost()
+ if err is not None:
+ self.removeWriter(writer)
+ writer.writeConnectionLost(failure.Failure(err))
+
+ def addReader(self, reader):
+ if reader in self._readers:
+ # Don't add the reader if it's already there
+ return
+ fd = reader.fileno()
+ self._readers[reader] = fd
+ if fd in self._fds:
+ (_, writer) = self._fds[fd]
+ self._fds[fd] = (reader, writer)
+ if writer:
+ # We already registered this fd for write events,
+ # update it for read events as well.
+ self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
+ else:
+ with NullContext():
+ self._fds[fd] = (reader, None)
+ self._io_loop.add_handler(fd, self._invoke_callback,
+ IOLoop.READ)
+
+ def addWriter(self, writer):
+ if writer in self._writers:
+ return
+ fd = writer.fileno()
+ self._writers[writer] = fd
+ if fd in self._fds:
+ (reader, _) = self._fds[fd]
+ self._fds[fd] = (reader, writer)
+ if reader:
+ # We already registered this fd for read events,
+ # update it for write events as well.
+ self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
+ else:
+ with NullContext():
+ self._fds[fd] = (None, writer)
+ self._io_loop.add_handler(fd, self._invoke_callback,
+ IOLoop.WRITE)
+
+ def removeReader(self, reader):
+ if reader in self._readers:
+ fd = self._readers.pop(reader)
+ (_, writer) = self._fds[fd]
+ if writer:
+ # We have a writer so we need to update the IOLoop for
+ # write events only.
+ self._fds[fd] = (None, writer)
+ self._io_loop.update_handler(fd, IOLoop.WRITE)
+ else:
+ # Since we have no writer registered, we remove the
+ # entry from _fds and unregister the handler from the
+ # IOLoop
+ del self._fds[fd]
+ self._io_loop.remove_handler(fd)
+
+ def removeWriter(self, writer):
+ if writer in self._writers:
+ fd = self._writers.pop(writer)
+ (reader, _) = self._fds[fd]
+ if reader:
+ # We have a reader so we need to update the IOLoop for
+ # read events only.
+ self._fds[fd] = (reader, None)
+ self._io_loop.update_handler(fd, IOLoop.READ)
+ else:
+ # Since we have no reader registered, we remove the
+ # entry from the _fds and unregister the handler from
+ # the IOLoop.
+ del self._fds[fd]
+ self._io_loop.remove_handler(fd)
+
+ def removeAll(self):
+ return self._removeAll(self._readers, self._writers)
+
+ def getReaders(self):
+ return self._readers.keys()
+
+ def getWriters(self):
+ return self._writers.keys()
+
+ # The following functions are mainly used in twisted-style test cases;
+ # it is expected that most users of the TornadoReactor will call
+ # IOLoop.start() instead of Reactor.run().
+ def stop(self):
+ PosixReactorBase.stop(self)
+ fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
+ self._io_loop.add_callback(fire_shutdown)
+
+ def crash(self):
+ PosixReactorBase.crash(self)
+ self._io_loop.stop()
+
+ def doIteration(self, delay):
+ raise NotImplementedError("doIteration")
+
+ def mainLoop(self):
+ # Since this class is intended to be used in applications
+ # where the top-level event loop is ``io_loop.start()`` rather
+ # than ``reactor.run()``, it is implemented a little
+ # differently than other Twisted reactors. We override
+ # ``mainLoop`` instead of ``doIteration`` and must implement
+ # timed call functionality on top of `.IOLoop.add_timeout`
+ # rather than using the implementation in
+ # ``PosixReactorBase``.
+ self._io_loop.start()
+
+
+class _TestReactor(TornadoReactor):
+ """Subclass of TornadoReactor for use in unittests.
+
+ This can't go in the test.py file because of import-order dependencies
+ with the Twisted reactor test builder.
+ """
+ def __init__(self):
+ # always use a new ioloop
+ super(_TestReactor, self).__init__(IOLoop())
+
+ def listenTCP(self, port, factory, backlog=50, interface=''):
+ # default to localhost to avoid firewall prompts on the mac
+ if not interface:
+ interface = '127.0.0.1'
+ return super(_TestReactor, self).listenTCP(
+ port, factory, backlog=backlog, interface=interface)
+
+ def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
+ if not interface:
+ interface = '127.0.0.1'
+ return super(_TestReactor, self).listenUDP(
+ port, protocol, interface=interface, maxPacketSize=maxPacketSize)
+
+
+def install(io_loop=None):
+ """Install this package as the default Twisted reactor.
+
+ ``install()`` must be called very early in the startup process,
+ before most other twisted-related imports. Conversely, because it
+ initializes the `.IOLoop`, it cannot be called before
+ `.fork_processes` or multi-process `~.TCPServer.start`. These
+ conflicting requirements make it difficult to use `.TornadoReactor`
+ in multi-process mode, and an external process manager such as
+ ``supervisord`` is recommended instead.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+
+ """
+ if not io_loop:
+ io_loop = tornado.ioloop.IOLoop.current()
+ reactor = TornadoReactor(io_loop)
+ from twisted.internet.main import installReactor # type: ignore
+ installReactor(reactor)
+ return reactor
+
+
+@implementer(IReadDescriptor, IWriteDescriptor)
+class _FD(object):
+ def __init__(self, fd, fileobj, handler):
+ self.fd = fd
+ self.fileobj = fileobj
+ self.handler = handler
+ self.reading = False
+ self.writing = False
+ self.lost = False
+
+ def fileno(self):
+ return self.fd
+
+ def doRead(self):
+ if not self.lost:
+ self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
+
+ def doWrite(self):
+ if not self.lost:
+ self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
+
+ def connectionLost(self, reason):
+ if not self.lost:
+ self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
+ self.lost = True
+
+ def logPrefix(self):
+ return ''
+
+
+class TwistedIOLoop(tornado.ioloop.IOLoop):
+ """IOLoop implementation that runs on Twisted.
+
+ `TwistedIOLoop` implements the Tornado IOLoop interface on top of
+ the Twisted reactor. Recommended usage::
+
+ from tornado.platform.twisted import TwistedIOLoop
+ from twisted.internet import reactor
+ TwistedIOLoop().install()
+ # Set up your tornado application as usual using `IOLoop.instance`
+ reactor.run()
+
+ Uses the global Twisted reactor by default. To create multiple
+ ``TwistedIOLoops`` in the same process, you must pass a unique reactor
+ when constructing each one.
+
+ Not compatible with `tornado.process.Subprocess.set_exit_callback`
+ because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
+ with each other.
+
+ See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
+ installing alternative IOLoops.
+ """
+ def initialize(self, reactor=None, **kwargs):
+ super(TwistedIOLoop, self).initialize(**kwargs)
+ if reactor is None:
+ import twisted.internet.reactor # type: ignore
+ reactor = twisted.internet.reactor
+ self.reactor = reactor
+ self.fds = {}
+
+ def close(self, all_fds=False):
+ fds = self.fds
+ self.reactor.removeAll()
+ for c in self.reactor.getDelayedCalls():
+ c.cancel()
+ if all_fds:
+ for fd in fds.values():
+ self.close_fd(fd.fileobj)
+
+ def add_handler(self, fd, handler, events):
+ if fd in self.fds:
+ raise ValueError('fd %s added twice' % fd)
+ fd, fileobj = self.split_fd(fd)
+ self.fds[fd] = _FD(fd, fileobj, wrap(handler))
+ if events & tornado.ioloop.IOLoop.READ:
+ self.fds[fd].reading = True
+ self.reactor.addReader(self.fds[fd])
+ if events & tornado.ioloop.IOLoop.WRITE:
+ self.fds[fd].writing = True
+ self.reactor.addWriter(self.fds[fd])
+
+ def update_handler(self, fd, events):
+ fd, fileobj = self.split_fd(fd)
+ if events & tornado.ioloop.IOLoop.READ:
+ if not self.fds[fd].reading:
+ self.fds[fd].reading = True
+ self.reactor.addReader(self.fds[fd])
+ else:
+ if self.fds[fd].reading:
+ self.fds[fd].reading = False
+ self.reactor.removeReader(self.fds[fd])
+ if events & tornado.ioloop.IOLoop.WRITE:
+ if not self.fds[fd].writing:
+ self.fds[fd].writing = True
+ self.reactor.addWriter(self.fds[fd])
+ else:
+ if self.fds[fd].writing:
+ self.fds[fd].writing = False
+ self.reactor.removeWriter(self.fds[fd])
+
+ def remove_handler(self, fd):
+ fd, fileobj = self.split_fd(fd)
+ if fd not in self.fds:
+ return
+ self.fds[fd].lost = True
+ if self.fds[fd].reading:
+ self.reactor.removeReader(self.fds[fd])
+ if self.fds[fd].writing:
+ self.reactor.removeWriter(self.fds[fd])
+ del self.fds[fd]
+
+ def start(self):
+ old_current = IOLoop.current(instance=False)
+ try:
+ self._setup_logging()
+ self.make_current()
+ self.reactor.run()
+ finally:
+ if old_current is None:
+ IOLoop.clear_current()
+ else:
+ old_current.make_current()
+
+ def stop(self):
+ self.reactor.crash()
+
+ def add_timeout(self, deadline, callback, *args, **kwargs):
+ # This method could be simplified (since tornado 4.0) by
+ # overriding call_at instead of add_timeout, but we leave it
+ # for now as a test of backwards-compatibility.
+ if isinstance(deadline, numbers.Real):
+ delay = max(deadline - self.time(), 0)
+ elif isinstance(deadline, datetime.timedelta):
+ delay = timedelta_to_seconds(deadline)
+ else:
+ raise TypeError("Unsupported deadline %r")
+ return self.reactor.callLater(
+ delay, self._run_callback,
+ functools.partial(wrap(callback), *args, **kwargs))
+
+ def remove_timeout(self, timeout):
+ if timeout.active():
+ timeout.cancel()
+
+ def add_callback(self, callback, *args, **kwargs):
+ self.reactor.callFromThread(
+ self._run_callback,
+ functools.partial(wrap(callback), *args, **kwargs))
+
+ def add_callback_from_signal(self, callback, *args, **kwargs):
+ self.add_callback(callback, *args, **kwargs)
+
+
+class TwistedResolver(Resolver):
+ """Twisted-based asynchronous resolver.
+
+ This is a non-blocking and non-threaded resolver. It is
+ recommended only when threads cannot be used, since it has
+ limitations compared to the standard ``getaddrinfo``-based
+ `~tornado.netutil.Resolver` and
+ `~tornado.netutil.ThreadedResolver`. Specifically, it returns at
+ most one result, and arguments other than ``host`` and ``family``
+ are ignored. It may fail to resolve when ``family`` is not
+ ``socket.AF_UNSPEC``.
+
+ Requires Twisted 12.1 or newer.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ def initialize(self, io_loop=None):
+ self.io_loop = io_loop or IOLoop.current()
+ # partial copy of twisted.names.client.createResolver, which doesn't
+ # allow for a reactor to be passed in.
+ self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
+
+ host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
+ cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
+ real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
+ reactor=self.reactor)
+ self.resolver = twisted.names.resolve.ResolverChain(
+ [host_resolver, cache_resolver, real_resolver])
+
+ @gen.coroutine
+ def resolve(self, host, port, family=0):
+ # getHostByName doesn't accept IP addresses, so if the input
+ # looks like an IP address just return it immediately.
+ if twisted.internet.abstract.isIPAddress(host):
+ resolved = host
+ resolved_family = socket.AF_INET
+ elif twisted.internet.abstract.isIPv6Address(host):
+ resolved = host
+ resolved_family = socket.AF_INET6
+ else:
+ deferred = self.resolver.getHostByName(utf8(host))
+ resolved = yield gen.Task(deferred.addBoth)
+ if isinstance(resolved, failure.Failure):
+ try:
+ resolved.raiseException()
+ except twisted.names.error.DomainError as e:
+ raise IOError(e)
+ elif twisted.internet.abstract.isIPAddress(resolved):
+ resolved_family = socket.AF_INET
+ elif twisted.internet.abstract.isIPv6Address(resolved):
+ resolved_family = socket.AF_INET6
+ else:
+ resolved_family = socket.AF_UNSPEC
+ if family != socket.AF_UNSPEC and family != resolved_family:
+ raise Exception('Requested socket family %d but got %d' %
+ (family, resolved_family))
+ result = [
+ (resolved_family, (resolved, port)),
+ ]
+ raise gen.Return(result)
+
+
+if hasattr(gen.convert_yielded, 'register'):
+ @gen.convert_yielded.register(Deferred) # type: ignore
+ def _(d):
+ f = Future()
+
+ def errback(failure):
+ try:
+ failure.raiseException()
+ # Should never happen, but just in case
+ raise Exception("errback called without error")
+ except:
+ f.set_exc_info(sys.exc_info())
+ d.addCallbacks(f.set_result, errback)
+ return f
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/windows.py b/contrib/python/tornado/tornado-4/tornado/platform/windows.py
index e94a0cf13d..5e223c8191 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/windows.py
+++ b/contrib/python/tornado/tornado-4/tornado/platform/windows.py
@@ -1,20 +1,20 @@
-# NOTE: win32 support is currently experimental, and not recommended
-# for production use.
-
-
-from __future__ import absolute_import, division, print_function
-import ctypes # type: ignore
-import ctypes.wintypes # type: ignore
-
-# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
-SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
-SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
-SetHandleInformation.restype = ctypes.wintypes.BOOL
-
-HANDLE_FLAG_INHERIT = 0x00000001
-
-
-def set_close_exec(fd):
- success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
- if not success:
- raise ctypes.WinError()
+# NOTE: win32 support is currently experimental, and not recommended
+# for production use.
+
+
+from __future__ import absolute_import, division, print_function
+import ctypes # type: ignore
+import ctypes.wintypes # type: ignore
+
+# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
+SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
+SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
+SetHandleInformation.restype = ctypes.wintypes.BOOL
+
+HANDLE_FLAG_INHERIT = 0x00000001
+
+
+def set_close_exec(fd):
+ success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
+ if not success:
+ raise ctypes.WinError()
diff --git a/contrib/python/tornado/tornado-4/tornado/platform/ya.make b/contrib/python/tornado/tornado-4/tornado/platform/ya.make
index 195c1fad93..c960eea835 100644
--- a/contrib/python/tornado/tornado-4/tornado/platform/ya.make
+++ b/contrib/python/tornado/tornado-4/tornado/platform/ya.make
@@ -1 +1 @@
-OWNER(g:python-contrib)
+OWNER(g:python-contrib)
diff --git a/contrib/python/tornado/tornado-4/tornado/process.py b/contrib/python/tornado/tornado-4/tornado/process.py
index fae94f3c13..8bf8818c1a 100644
--- a/contrib/python/tornado/tornado-4/tornado/process.py
+++ b/contrib/python/tornado/tornado-4/tornado/process.py
@@ -1,365 +1,365 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utilities for working with multiple processes, including both forking
-the server into multiple processes and managing subprocesses.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import errno
-import os
-import signal
-import subprocess
-import sys
-import time
-
-from binascii import hexlify
-
-from tornado.concurrent import Future
-from tornado import ioloop
-from tornado.iostream import PipeIOStream
-from tornado.log import gen_log
-from tornado.platform.auto import set_close_exec
-from tornado import stack_context
-from tornado.util import errno_from_exception, PY3
-
-try:
- import multiprocessing
-except ImportError:
- # Multiprocessing is not available on Google App Engine.
- multiprocessing = None
-
-if PY3:
- long = int
-
-# Re-export this exception for convenience.
-try:
- CalledProcessError = subprocess.CalledProcessError
-except AttributeError:
- # The subprocess module exists in Google App Engine, but is empty.
- # This module isn't very useful in that case, but it should
- # at least be importable.
- if 'APPENGINE_RUNTIME' not in os.environ:
- raise
-
-
-def cpu_count():
- """Returns the number of processors on this machine."""
- if multiprocessing is None:
- return 1
- try:
- return multiprocessing.cpu_count()
- except NotImplementedError:
- pass
- try:
- return os.sysconf("SC_NPROCESSORS_CONF")
- except (AttributeError, ValueError):
- pass
- gen_log.error("Could not detect number of processors; assuming 1")
- return 1
-
-
-def _reseed_random():
- if 'random' not in sys.modules:
- return
- import random
- # If os.urandom is available, this method does the same thing as
- # random.seed (at least as of python 2.6). If os.urandom is not
- # available, we mix in the pid in addition to a timestamp.
- try:
- seed = long(hexlify(os.urandom(16)), 16)
- except NotImplementedError:
- seed = int(time.time() * 1000) ^ os.getpid()
- random.seed(seed)
-
-
-def _pipe_cloexec():
- r, w = os.pipe()
- set_close_exec(r)
- set_close_exec(w)
- return r, w
-
-
-_task_id = None
-
-
-def fork_processes(num_processes, max_restarts=100):
- """Starts multiple worker processes.
-
- If ``num_processes`` is None or <= 0, we detect the number of cores
- available on this machine and fork that number of child
- processes. If ``num_processes`` is given and > 0, we fork that
- specific number of sub-processes.
-
- Since we use processes and not threads, there is no shared memory
- between any server code.
-
- Note that multiple processes are not compatible with the autoreload
- module (or the ``autoreload=True`` option to `tornado.web.Application`
- which defaults to True when ``debug=True``).
- When using multiple processes, no IOLoops can be created or
- referenced until after the call to ``fork_processes``.
-
- In each child process, ``fork_processes`` returns its *task id*, a
- number between 0 and ``num_processes``. Processes that exit
- abnormally (due to a signal or non-zero exit status) are restarted
- with the same id (up to ``max_restarts`` times). In the parent
- process, ``fork_processes`` returns None if all child processes
- have exited normally, but will otherwise only exit by throwing an
- exception.
- """
- global _task_id
- assert _task_id is None
- if num_processes is None or num_processes <= 0:
- num_processes = cpu_count()
- if ioloop.IOLoop.initialized():
- raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
- "has already been initialized. You cannot call "
- "IOLoop.instance() before calling start_processes()")
- gen_log.info("Starting %d processes", num_processes)
- children = {}
-
- def start_child(i):
- pid = os.fork()
- if pid == 0:
- # child process
- _reseed_random()
- global _task_id
- _task_id = i
- return i
- else:
- children[pid] = i
- return None
-
- for i in range(num_processes):
- id = start_child(i)
- if id is not None:
- return id
- num_restarts = 0
- while children:
- try:
- pid, status = os.wait()
- except OSError as e:
- if errno_from_exception(e) == errno.EINTR:
- continue
- raise
- if pid not in children:
- continue
- id = children.pop(pid)
- if os.WIFSIGNALED(status):
- gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
- id, pid, os.WTERMSIG(status))
- elif os.WEXITSTATUS(status) != 0:
- gen_log.warning("child %d (pid %d) exited with status %d, restarting",
- id, pid, os.WEXITSTATUS(status))
- else:
- gen_log.info("child %d (pid %d) exited normally", id, pid)
- continue
- num_restarts += 1
- if num_restarts > max_restarts:
- raise RuntimeError("Too many child restarts, giving up")
- new_id = start_child(id)
- if new_id is not None:
- return new_id
- # All child processes exited cleanly, so exit the master process
- # instead of just returning to right after the call to
- # fork_processes (which will probably just start up another IOLoop
- # unless the caller checks the return value).
- sys.exit(0)
-
-
-def task_id():
- """Returns the current task id, if any.
-
- Returns None if this process was not created by `fork_processes`.
- """
- global _task_id
- return _task_id
-
-
-class Subprocess(object):
- """Wraps ``subprocess.Popen`` with IOStream support.
-
- The constructor is the same as ``subprocess.Popen`` with the following
- additions:
-
- * ``stdin``, ``stdout``, and ``stderr`` may have the value
- ``tornado.process.Subprocess.STREAM``, which will make the corresponding
- attribute of the resulting Subprocess a `.PipeIOStream`.
- * A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
-
- The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
- ``wait_for_exit`` methods do not work on Windows. There is
- therefore no reason to use this class instead of
- ``subprocess.Popen`` on that platform.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
-
- """
- STREAM = object()
-
- _initialized = False
- _waiting = {} # type: ignore
-
- def __init__(self, *args, **kwargs):
- self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
- # All FDs we create should be closed on error; those in to_close
- # should be closed in the parent process on success.
- pipe_fds = []
- to_close = []
- if kwargs.get('stdin') is Subprocess.STREAM:
- in_r, in_w = _pipe_cloexec()
- kwargs['stdin'] = in_r
- pipe_fds.extend((in_r, in_w))
- to_close.append(in_r)
- self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
- if kwargs.get('stdout') is Subprocess.STREAM:
- out_r, out_w = _pipe_cloexec()
- kwargs['stdout'] = out_w
- pipe_fds.extend((out_r, out_w))
- to_close.append(out_w)
- self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
- if kwargs.get('stderr') is Subprocess.STREAM:
- err_r, err_w = _pipe_cloexec()
- kwargs['stderr'] = err_w
- pipe_fds.extend((err_r, err_w))
- to_close.append(err_w)
- self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
- try:
- self.proc = subprocess.Popen(*args, **kwargs)
- except:
- for fd in pipe_fds:
- os.close(fd)
- raise
- for fd in to_close:
- os.close(fd)
- for attr in ['stdin', 'stdout', 'stderr', 'pid']:
- if not hasattr(self, attr): # don't clobber streams set above
- setattr(self, attr, getattr(self.proc, attr))
- self._exit_callback = None
- self.returncode = None
-
- def set_exit_callback(self, callback):
- """Runs ``callback`` when this process exits.
-
- The callback takes one argument, the return code of the process.
-
- This method uses a ``SIGCHLD`` handler, which is a global setting
- and may conflict if you have other libraries trying to handle the
- same signal. If you are using more than one ``IOLoop`` it may
- be necessary to call `Subprocess.initialize` first to designate
- one ``IOLoop`` to run the signal handlers.
-
- In many cases a close callback on the stdout or stderr streams
- can be used as an alternative to an exit callback if the
- signal handler is causing a problem.
- """
- self._exit_callback = stack_context.wrap(callback)
- Subprocess.initialize(self.io_loop)
- Subprocess._waiting[self.pid] = self
- Subprocess._try_cleanup_process(self.pid)
-
- def wait_for_exit(self, raise_error=True):
- """Returns a `.Future` which resolves when the process exits.
-
- Usage::
-
- ret = yield proc.wait_for_exit()
-
- This is a coroutine-friendly alternative to `set_exit_callback`
- (and a replacement for the blocking `subprocess.Popen.wait`).
-
- By default, raises `subprocess.CalledProcessError` if the process
- has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
- to suppress this behavior and return the exit status without raising.
-
- .. versionadded:: 4.2
- """
- future = Future()
-
- def callback(ret):
- if ret != 0 and raise_error:
- # Unfortunately we don't have the original args any more.
- future.set_exception(CalledProcessError(ret, None))
- else:
- future.set_result(ret)
- self.set_exit_callback(callback)
- return future
-
- @classmethod
- def initialize(cls, io_loop=None):
- """Initializes the ``SIGCHLD`` handler.
-
- The signal handler is run on an `.IOLoop` to avoid locking issues.
- Note that the `.IOLoop` used for signal handling need not be the
- same one used by individual Subprocess objects (as long as the
- ``IOLoops`` are each running in separate threads).
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- if cls._initialized:
- return
- if io_loop is None:
- io_loop = ioloop.IOLoop.current()
- cls._old_sigchld = signal.signal(
- signal.SIGCHLD,
- lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
- cls._initialized = True
-
- @classmethod
- def uninitialize(cls):
- """Removes the ``SIGCHLD`` handler."""
- if not cls._initialized:
- return
- signal.signal(signal.SIGCHLD, cls._old_sigchld)
- cls._initialized = False
-
- @classmethod
- def _cleanup(cls):
- for pid in list(cls._waiting.keys()): # make a copy
- cls._try_cleanup_process(pid)
-
- @classmethod
- def _try_cleanup_process(cls, pid):
- try:
- ret_pid, status = os.waitpid(pid, os.WNOHANG)
- except OSError as e:
- if errno_from_exception(e) == errno.ECHILD:
- return
- if ret_pid == 0:
- return
- assert ret_pid == pid
- subproc = cls._waiting.pop(pid)
- subproc.io_loop.add_callback_from_signal(
- subproc._set_returncode, status)
-
- def _set_returncode(self, status):
- if os.WIFSIGNALED(status):
- self.returncode = -os.WTERMSIG(status)
- else:
- assert os.WIFEXITED(status)
- self.returncode = os.WEXITSTATUS(status)
- # We've taken over wait() duty from the subprocess.Popen
- # object. If we don't inform it of the process's return code,
- # it will log a warning at destruction in python 3.6+.
- self.proc.returncode = self.returncode
- if self._exit_callback:
- callback = self._exit_callback
- self._exit_callback = None
- callback(self.returncode)
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities for working with multiple processes, including both forking
+the server into multiple processes and managing subprocesses.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import errno
+import os
+import signal
+import subprocess
+import sys
+import time
+
+from binascii import hexlify
+
+from tornado.concurrent import Future
+from tornado import ioloop
+from tornado.iostream import PipeIOStream
+from tornado.log import gen_log
+from tornado.platform.auto import set_close_exec
+from tornado import stack_context
+from tornado.util import errno_from_exception, PY3
+
+try:
+ import multiprocessing
+except ImportError:
+ # Multiprocessing is not available on Google App Engine.
+ multiprocessing = None
+
+if PY3:
+ long = int
+
+# Re-export this exception for convenience.
+try:
+ CalledProcessError = subprocess.CalledProcessError
+except AttributeError:
+ # The subprocess module exists in Google App Engine, but is empty.
+ # This module isn't very useful in that case, but it should
+ # at least be importable.
+ if 'APPENGINE_RUNTIME' not in os.environ:
+ raise
+
+
+def cpu_count():
+ """Returns the number of processors on this machine."""
+ if multiprocessing is None:
+ return 1
+ try:
+ return multiprocessing.cpu_count()
+ except NotImplementedError:
+ pass
+ try:
+ return os.sysconf("SC_NPROCESSORS_CONF")
+ except (AttributeError, ValueError):
+ pass
+ gen_log.error("Could not detect number of processors; assuming 1")
+ return 1
+
+
+def _reseed_random():
+ if 'random' not in sys.modules:
+ return
+ import random
+ # If os.urandom is available, this method does the same thing as
+ # random.seed (at least as of python 2.6). If os.urandom is not
+ # available, we mix in the pid in addition to a timestamp.
+ try:
+ seed = long(hexlify(os.urandom(16)), 16)
+ except NotImplementedError:
+ seed = int(time.time() * 1000) ^ os.getpid()
+ random.seed(seed)
+
+
+def _pipe_cloexec():
+ r, w = os.pipe()
+ set_close_exec(r)
+ set_close_exec(w)
+ return r, w
+
+
+_task_id = None
+
+
+def fork_processes(num_processes, max_restarts=100):
+ """Starts multiple worker processes.
+
+ If ``num_processes`` is None or <= 0, we detect the number of cores
+ available on this machine and fork that number of child
+ processes. If ``num_processes`` is given and > 0, we fork that
+ specific number of sub-processes.
+
+ Since we use processes and not threads, there is no shared memory
+ between any server code.
+
+ Note that multiple processes are not compatible with the autoreload
+ module (or the ``autoreload=True`` option to `tornado.web.Application`
+ which defaults to True when ``debug=True``).
+ When using multiple processes, no IOLoops can be created or
+ referenced until after the call to ``fork_processes``.
+
+ In each child process, ``fork_processes`` returns its *task id*, a
+ number between 0 and ``num_processes``. Processes that exit
+ abnormally (due to a signal or non-zero exit status) are restarted
+ with the same id (up to ``max_restarts`` times). In the parent
+ process, ``fork_processes`` returns None if all child processes
+ have exited normally, but will otherwise only exit by throwing an
+ exception.
+ """
+ global _task_id
+ assert _task_id is None
+ if num_processes is None or num_processes <= 0:
+ num_processes = cpu_count()
+ if ioloop.IOLoop.initialized():
+ raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
+ "has already been initialized. You cannot call "
+ "IOLoop.instance() before calling start_processes()")
+ gen_log.info("Starting %d processes", num_processes)
+ children = {}
+
+ def start_child(i):
+ pid = os.fork()
+ if pid == 0:
+ # child process
+ _reseed_random()
+ global _task_id
+ _task_id = i
+ return i
+ else:
+ children[pid] = i
+ return None
+
+ for i in range(num_processes):
+ id = start_child(i)
+ if id is not None:
+ return id
+ num_restarts = 0
+ while children:
+ try:
+ pid, status = os.wait()
+ except OSError as e:
+ if errno_from_exception(e) == errno.EINTR:
+ continue
+ raise
+ if pid not in children:
+ continue
+ id = children.pop(pid)
+ if os.WIFSIGNALED(status):
+ gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
+ id, pid, os.WTERMSIG(status))
+ elif os.WEXITSTATUS(status) != 0:
+ gen_log.warning("child %d (pid %d) exited with status %d, restarting",
+ id, pid, os.WEXITSTATUS(status))
+ else:
+ gen_log.info("child %d (pid %d) exited normally", id, pid)
+ continue
+ num_restarts += 1
+ if num_restarts > max_restarts:
+ raise RuntimeError("Too many child restarts, giving up")
+ new_id = start_child(id)
+ if new_id is not None:
+ return new_id
+ # All child processes exited cleanly, so exit the master process
+ # instead of just returning to right after the call to
+ # fork_processes (which will probably just start up another IOLoop
+ # unless the caller checks the return value).
+ sys.exit(0)
+
+
+def task_id():
+ """Returns the current task id, if any.
+
+ Returns None if this process was not created by `fork_processes`.
+ """
+ global _task_id
+ return _task_id
+
+
+class Subprocess(object):
+ """Wraps ``subprocess.Popen`` with IOStream support.
+
+ The constructor is the same as ``subprocess.Popen`` with the following
+ additions:
+
+ * ``stdin``, ``stdout``, and ``stderr`` may have the value
+ ``tornado.process.Subprocess.STREAM``, which will make the corresponding
+ attribute of the resulting Subprocess a `.PipeIOStream`.
+ * A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
+
+ The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
+ ``wait_for_exit`` methods do not work on Windows. There is
+ therefore no reason to use this class instead of
+ ``subprocess.Popen`` on that platform.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+
+ """
+ STREAM = object()
+
+ _initialized = False
+ _waiting = {} # type: ignore
+
+ def __init__(self, *args, **kwargs):
+ self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
+ # All FDs we create should be closed on error; those in to_close
+ # should be closed in the parent process on success.
+ pipe_fds = []
+ to_close = []
+ if kwargs.get('stdin') is Subprocess.STREAM:
+ in_r, in_w = _pipe_cloexec()
+ kwargs['stdin'] = in_r
+ pipe_fds.extend((in_r, in_w))
+ to_close.append(in_r)
+ self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
+ if kwargs.get('stdout') is Subprocess.STREAM:
+ out_r, out_w = _pipe_cloexec()
+ kwargs['stdout'] = out_w
+ pipe_fds.extend((out_r, out_w))
+ to_close.append(out_w)
+ self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
+ if kwargs.get('stderr') is Subprocess.STREAM:
+ err_r, err_w = _pipe_cloexec()
+ kwargs['stderr'] = err_w
+ pipe_fds.extend((err_r, err_w))
+ to_close.append(err_w)
+ self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
+ try:
+ self.proc = subprocess.Popen(*args, **kwargs)
+ except:
+ for fd in pipe_fds:
+ os.close(fd)
+ raise
+ for fd in to_close:
+ os.close(fd)
+ for attr in ['stdin', 'stdout', 'stderr', 'pid']:
+ if not hasattr(self, attr): # don't clobber streams set above
+ setattr(self, attr, getattr(self.proc, attr))
+ self._exit_callback = None
+ self.returncode = None
+
+ def set_exit_callback(self, callback):
+ """Runs ``callback`` when this process exits.
+
+ The callback takes one argument, the return code of the process.
+
+ This method uses a ``SIGCHLD`` handler, which is a global setting
+ and may conflict if you have other libraries trying to handle the
+ same signal. If you are using more than one ``IOLoop`` it may
+ be necessary to call `Subprocess.initialize` first to designate
+ one ``IOLoop`` to run the signal handlers.
+
+ In many cases a close callback on the stdout or stderr streams
+ can be used as an alternative to an exit callback if the
+ signal handler is causing a problem.
+ """
+ self._exit_callback = stack_context.wrap(callback)
+ Subprocess.initialize(self.io_loop)
+ Subprocess._waiting[self.pid] = self
+ Subprocess._try_cleanup_process(self.pid)
+
+ def wait_for_exit(self, raise_error=True):
+ """Returns a `.Future` which resolves when the process exits.
+
+ Usage::
+
+ ret = yield proc.wait_for_exit()
+
+ This is a coroutine-friendly alternative to `set_exit_callback`
+ (and a replacement for the blocking `subprocess.Popen.wait`).
+
+ By default, raises `subprocess.CalledProcessError` if the process
+ has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
+ to suppress this behavior and return the exit status without raising.
+
+ .. versionadded:: 4.2
+ """
+ future = Future()
+
+ def callback(ret):
+ if ret != 0 and raise_error:
+ # Unfortunately we don't have the original args any more.
+ future.set_exception(CalledProcessError(ret, None))
+ else:
+ future.set_result(ret)
+ self.set_exit_callback(callback)
+ return future
+
+ @classmethod
+ def initialize(cls, io_loop=None):
+ """Initializes the ``SIGCHLD`` handler.
+
+ The signal handler is run on an `.IOLoop` to avoid locking issues.
+ Note that the `.IOLoop` used for signal handling need not be the
+ same one used by individual Subprocess objects (as long as the
+ ``IOLoops`` are each running in separate threads).
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ if cls._initialized:
+ return
+ if io_loop is None:
+ io_loop = ioloop.IOLoop.current()
+ cls._old_sigchld = signal.signal(
+ signal.SIGCHLD,
+ lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
+ cls._initialized = True
+
+ @classmethod
+ def uninitialize(cls):
+ """Removes the ``SIGCHLD`` handler."""
+ if not cls._initialized:
+ return
+ signal.signal(signal.SIGCHLD, cls._old_sigchld)
+ cls._initialized = False
+
+ @classmethod
+ def _cleanup(cls):
+ for pid in list(cls._waiting.keys()): # make a copy
+ cls._try_cleanup_process(pid)
+
+ @classmethod
+ def _try_cleanup_process(cls, pid):
+ try:
+ ret_pid, status = os.waitpid(pid, os.WNOHANG)
+ except OSError as e:
+ if errno_from_exception(e) == errno.ECHILD:
+ return
+ if ret_pid == 0:
+ return
+ assert ret_pid == pid
+ subproc = cls._waiting.pop(pid)
+ subproc.io_loop.add_callback_from_signal(
+ subproc._set_returncode, status)
+
+ def _set_returncode(self, status):
+ if os.WIFSIGNALED(status):
+ self.returncode = -os.WTERMSIG(status)
+ else:
+ assert os.WIFEXITED(status)
+ self.returncode = os.WEXITSTATUS(status)
+ # We've taken over wait() duty from the subprocess.Popen
+ # object. If we don't inform it of the process's return code,
+ # it will log a warning at destruction in python 3.6+.
+ self.proc.returncode = self.returncode
+ if self._exit_callback:
+ callback = self._exit_callback
+ self._exit_callback = None
+ callback(self.returncode)
diff --git a/contrib/python/tornado/tornado-4/tornado/queues.py b/contrib/python/tornado/tornado-4/tornado/queues.py
index 0041a80086..76c4843b8b 100644
--- a/contrib/python/tornado/tornado-4/tornado/queues.py
+++ b/contrib/python/tornado/tornado-4/tornado/queues.py
@@ -1,367 +1,367 @@
-# Copyright 2015 The Tornado Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Asynchronous queues for coroutines.
-
-.. warning::
-
- Unlike the standard library's `queue` module, the classes defined here
- are *not* thread-safe. To use these queues from another thread,
- use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread
- before calling any queue methods.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import collections
-import heapq
-
-from tornado import gen, ioloop
-from tornado.concurrent import Future
-from tornado.locks import Event
-
-__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
-
-
-class QueueEmpty(Exception):
- """Raised by `.Queue.get_nowait` when the queue has no items."""
- pass
-
-
-class QueueFull(Exception):
- """Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
- pass
-
-
-def _set_timeout(future, timeout):
- if timeout:
- def on_timeout():
- future.set_exception(gen.TimeoutError())
- io_loop = ioloop.IOLoop.current()
- timeout_handle = io_loop.add_timeout(timeout, on_timeout)
- future.add_done_callback(
- lambda _: io_loop.remove_timeout(timeout_handle))
-
-
-class _QueueIterator(object):
- def __init__(self, q):
- self.q = q
-
- def __anext__(self):
- return self.q.get()
-
-
-class Queue(object):
- """Coordinate producer and consumer coroutines.
-
- If maxsize is 0 (the default) the queue size is unbounded.
-
- .. testcode::
-
- from tornado import gen
- from tornado.ioloop import IOLoop
- from tornado.queues import Queue
-
- q = Queue(maxsize=2)
-
- @gen.coroutine
- def consumer():
- while True:
- item = yield q.get()
- try:
- print('Doing work on %s' % item)
- yield gen.sleep(0.01)
- finally:
- q.task_done()
-
- @gen.coroutine
- def producer():
- for item in range(5):
- yield q.put(item)
- print('Put %s' % item)
-
- @gen.coroutine
- def main():
- # Start consumer without waiting (since it never finishes).
- IOLoop.current().spawn_callback(consumer)
- yield producer() # Wait for producer to put all tasks.
- yield q.join() # Wait for consumer to finish all tasks.
- print('Done')
-
- IOLoop.current().run_sync(main)
-
- .. testoutput::
-
- Put 0
- Put 1
- Doing work on 0
- Put 2
- Doing work on 1
- Put 3
- Doing work on 2
- Put 4
- Doing work on 3
- Doing work on 4
- Done
-
- In Python 3.5, `Queue` implements the async iterator protocol, so
- ``consumer()`` could be rewritten as::
-
- async def consumer():
- async for item in q:
- try:
- print('Doing work on %s' % item)
- yield gen.sleep(0.01)
- finally:
- q.task_done()
-
- .. versionchanged:: 4.3
- Added ``async for`` support in Python 3.5.
-
- """
- def __init__(self, maxsize=0):
- if maxsize is None:
- raise TypeError("maxsize can't be None")
-
- if maxsize < 0:
- raise ValueError("maxsize can't be negative")
-
- self._maxsize = maxsize
- self._init()
- self._getters = collections.deque([]) # Futures.
- self._putters = collections.deque([]) # Pairs of (item, Future).
- self._unfinished_tasks = 0
- self._finished = Event()
- self._finished.set()
-
- @property
- def maxsize(self):
- """Number of items allowed in the queue."""
- return self._maxsize
-
- def qsize(self):
- """Number of items in the queue."""
- return len(self._queue)
-
- def empty(self):
- return not self._queue
-
- def full(self):
- if self.maxsize == 0:
- return False
- else:
- return self.qsize() >= self.maxsize
-
- def put(self, item, timeout=None):
- """Put an item into the queue, perhaps waiting until there is room.
-
- Returns a Future, which raises `tornado.gen.TimeoutError` after a
- timeout.
- """
- try:
- self.put_nowait(item)
- except QueueFull:
- future = Future()
- self._putters.append((item, future))
- _set_timeout(future, timeout)
- return future
- else:
- return gen._null_future
-
- def put_nowait(self, item):
- """Put an item into the queue without blocking.
-
- If no free slot is immediately available, raise `QueueFull`.
- """
- self._consume_expired()
- if self._getters:
- assert self.empty(), "queue non-empty, why are getters waiting?"
- getter = self._getters.popleft()
- self.__put_internal(item)
- getter.set_result(self._get())
- elif self.full():
- raise QueueFull
- else:
- self.__put_internal(item)
-
- def get(self, timeout=None):
- """Remove and return an item from the queue.
-
- Returns a Future which resolves once an item is available, or raises
- `tornado.gen.TimeoutError` after a timeout.
- """
- future = Future()
- try:
- future.set_result(self.get_nowait())
- except QueueEmpty:
- self._getters.append(future)
- _set_timeout(future, timeout)
- return future
-
- def get_nowait(self):
- """Remove and return an item from the queue without blocking.
-
- Return an item if one is immediately available, else raise
- `QueueEmpty`.
- """
- self._consume_expired()
- if self._putters:
- assert self.full(), "queue not full, why are putters waiting?"
- item, putter = self._putters.popleft()
- self.__put_internal(item)
- putter.set_result(None)
- return self._get()
- elif self.qsize():
- return self._get()
- else:
- raise QueueEmpty
-
- def task_done(self):
- """Indicate that a formerly enqueued task is complete.
-
- Used by queue consumers. For each `.get` used to fetch a task, a
- subsequent call to `.task_done` tells the queue that the processing
- on the task is complete.
-
- If a `.join` is blocking, it resumes when all items have been
- processed; that is, when every `.put` is matched by a `.task_done`.
-
- Raises `ValueError` if called more times than `.put`.
- """
- if self._unfinished_tasks <= 0:
- raise ValueError('task_done() called too many times')
- self._unfinished_tasks -= 1
- if self._unfinished_tasks == 0:
- self._finished.set()
-
- def join(self, timeout=None):
- """Block until all items in the queue are processed.
-
- Returns a Future, which raises `tornado.gen.TimeoutError` after a
- timeout.
- """
- return self._finished.wait(timeout)
-
- @gen.coroutine
- def __aiter__(self):
- return _QueueIterator(self)
-
- # These three are overridable in subclasses.
- def _init(self):
- self._queue = collections.deque()
-
- def _get(self):
- return self._queue.popleft()
-
- def _put(self, item):
- self._queue.append(item)
- # End of the overridable methods.
-
- def __put_internal(self, item):
- self._unfinished_tasks += 1
- self._finished.clear()
- self._put(item)
-
- def _consume_expired(self):
- # Remove timed-out waiters.
- while self._putters and self._putters[0][1].done():
- self._putters.popleft()
-
- while self._getters and self._getters[0].done():
- self._getters.popleft()
-
- def __repr__(self):
- return '<%s at %s %s>' % (
- type(self).__name__, hex(id(self)), self._format())
-
- def __str__(self):
- return '<%s %s>' % (type(self).__name__, self._format())
-
- def _format(self):
- result = 'maxsize=%r' % (self.maxsize, )
- if getattr(self, '_queue', None):
- result += ' queue=%r' % self._queue
- if self._getters:
- result += ' getters[%s]' % len(self._getters)
- if self._putters:
- result += ' putters[%s]' % len(self._putters)
- if self._unfinished_tasks:
- result += ' tasks=%s' % self._unfinished_tasks
- return result
-
-
-class PriorityQueue(Queue):
- """A `.Queue` that retrieves entries in priority order, lowest first.
-
- Entries are typically tuples like ``(priority number, data)``.
-
- .. testcode::
-
- from tornado.queues import PriorityQueue
-
- q = PriorityQueue()
- q.put((1, 'medium-priority item'))
- q.put((0, 'high-priority item'))
- q.put((10, 'low-priority item'))
-
- print(q.get_nowait())
- print(q.get_nowait())
- print(q.get_nowait())
-
- .. testoutput::
-
- (0, 'high-priority item')
- (1, 'medium-priority item')
- (10, 'low-priority item')
- """
- def _init(self):
- self._queue = []
-
- def _put(self, item):
- heapq.heappush(self._queue, item)
-
- def _get(self):
- return heapq.heappop(self._queue)
-
-
-class LifoQueue(Queue):
- """A `.Queue` that retrieves the most recently put items first.
-
- .. testcode::
-
- from tornado.queues import LifoQueue
-
- q = LifoQueue()
- q.put(3)
- q.put(2)
- q.put(1)
-
- print(q.get_nowait())
- print(q.get_nowait())
- print(q.get_nowait())
-
- .. testoutput::
-
- 1
- 2
- 3
- """
- def _init(self):
- self._queue = []
-
- def _put(self, item):
- self._queue.append(item)
-
- def _get(self):
- return self._queue.pop()
+# Copyright 2015 The Tornado Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Asynchronous queues for coroutines.
+
+.. warning::
+
+ Unlike the standard library's `queue` module, the classes defined here
+ are *not* thread-safe. To use these queues from another thread,
+ use `.IOLoop.add_callback` to transfer control to the `.IOLoop` thread
+ before calling any queue methods.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import collections
+import heapq
+
+from tornado import gen, ioloop
+from tornado.concurrent import Future
+from tornado.locks import Event
+
+__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
+
+
+class QueueEmpty(Exception):
+ """Raised by `.Queue.get_nowait` when the queue has no items."""
+ pass
+
+
+class QueueFull(Exception):
+ """Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
+ pass
+
+
+def _set_timeout(future, timeout):
+ if timeout:
+ def on_timeout():
+ future.set_exception(gen.TimeoutError())
+ io_loop = ioloop.IOLoop.current()
+ timeout_handle = io_loop.add_timeout(timeout, on_timeout)
+ future.add_done_callback(
+ lambda _: io_loop.remove_timeout(timeout_handle))
+
+
+class _QueueIterator(object):
+ def __init__(self, q):
+ self.q = q
+
+ def __anext__(self):
+ return self.q.get()
+
+
+class Queue(object):
+ """Coordinate producer and consumer coroutines.
+
+ If maxsize is 0 (the default) the queue size is unbounded.
+
+ .. testcode::
+
+ from tornado import gen
+ from tornado.ioloop import IOLoop
+ from tornado.queues import Queue
+
+ q = Queue(maxsize=2)
+
+ @gen.coroutine
+ def consumer():
+ while True:
+ item = yield q.get()
+ try:
+ print('Doing work on %s' % item)
+ yield gen.sleep(0.01)
+ finally:
+ q.task_done()
+
+ @gen.coroutine
+ def producer():
+ for item in range(5):
+ yield q.put(item)
+ print('Put %s' % item)
+
+ @gen.coroutine
+ def main():
+ # Start consumer without waiting (since it never finishes).
+ IOLoop.current().spawn_callback(consumer)
+ yield producer() # Wait for producer to put all tasks.
+ yield q.join() # Wait for consumer to finish all tasks.
+ print('Done')
+
+ IOLoop.current().run_sync(main)
+
+ .. testoutput::
+
+ Put 0
+ Put 1
+ Doing work on 0
+ Put 2
+ Doing work on 1
+ Put 3
+ Doing work on 2
+ Put 4
+ Doing work on 3
+ Doing work on 4
+ Done
+
+ In Python 3.5, `Queue` implements the async iterator protocol, so
+ ``consumer()`` could be rewritten as::
+
+ async def consumer():
+ async for item in q:
+ try:
+ print('Doing work on %s' % item)
+ yield gen.sleep(0.01)
+ finally:
+ q.task_done()
+
+ .. versionchanged:: 4.3
+ Added ``async for`` support in Python 3.5.
+
+ """
+ def __init__(self, maxsize=0):
+ if maxsize is None:
+ raise TypeError("maxsize can't be None")
+
+ if maxsize < 0:
+ raise ValueError("maxsize can't be negative")
+
+ self._maxsize = maxsize
+ self._init()
+ self._getters = collections.deque([]) # Futures.
+ self._putters = collections.deque([]) # Pairs of (item, Future).
+ self._unfinished_tasks = 0
+ self._finished = Event()
+ self._finished.set()
+
+ @property
+ def maxsize(self):
+ """Number of items allowed in the queue."""
+ return self._maxsize
+
+ def qsize(self):
+ """Number of items in the queue."""
+ return len(self._queue)
+
+ def empty(self):
+ return not self._queue
+
+ def full(self):
+ if self.maxsize == 0:
+ return False
+ else:
+ return self.qsize() >= self.maxsize
+
+ def put(self, item, timeout=None):
+ """Put an item into the queue, perhaps waiting until there is room.
+
+ Returns a Future, which raises `tornado.gen.TimeoutError` after a
+ timeout.
+ """
+ try:
+ self.put_nowait(item)
+ except QueueFull:
+ future = Future()
+ self._putters.append((item, future))
+ _set_timeout(future, timeout)
+ return future
+ else:
+ return gen._null_future
+
+ def put_nowait(self, item):
+ """Put an item into the queue without blocking.
+
+ If no free slot is immediately available, raise `QueueFull`.
+ """
+ self._consume_expired()
+ if self._getters:
+ assert self.empty(), "queue non-empty, why are getters waiting?"
+ getter = self._getters.popleft()
+ self.__put_internal(item)
+ getter.set_result(self._get())
+ elif self.full():
+ raise QueueFull
+ else:
+ self.__put_internal(item)
+
+ def get(self, timeout=None):
+ """Remove and return an item from the queue.
+
+ Returns a Future which resolves once an item is available, or raises
+ `tornado.gen.TimeoutError` after a timeout.
+ """
+ future = Future()
+ try:
+ future.set_result(self.get_nowait())
+ except QueueEmpty:
+ self._getters.append(future)
+ _set_timeout(future, timeout)
+ return future
+
+ def get_nowait(self):
+ """Remove and return an item from the queue without blocking.
+
+ Return an item if one is immediately available, else raise
+ `QueueEmpty`.
+ """
+ self._consume_expired()
+ if self._putters:
+ assert self.full(), "queue not full, why are putters waiting?"
+ item, putter = self._putters.popleft()
+ self.__put_internal(item)
+ putter.set_result(None)
+ return self._get()
+ elif self.qsize():
+ return self._get()
+ else:
+ raise QueueEmpty
+
+ def task_done(self):
+ """Indicate that a formerly enqueued task is complete.
+
+ Used by queue consumers. For each `.get` used to fetch a task, a
+ subsequent call to `.task_done` tells the queue that the processing
+ on the task is complete.
+
+ If a `.join` is blocking, it resumes when all items have been
+ processed; that is, when every `.put` is matched by a `.task_done`.
+
+ Raises `ValueError` if called more times than `.put`.
+ """
+ if self._unfinished_tasks <= 0:
+ raise ValueError('task_done() called too many times')
+ self._unfinished_tasks -= 1
+ if self._unfinished_tasks == 0:
+ self._finished.set()
+
+ def join(self, timeout=None):
+ """Block until all items in the queue are processed.
+
+ Returns a Future, which raises `tornado.gen.TimeoutError` after a
+ timeout.
+ """
+ return self._finished.wait(timeout)
+
+ @gen.coroutine
+ def __aiter__(self):
+ return _QueueIterator(self)
+
+ # These three are overridable in subclasses.
+ def _init(self):
+ self._queue = collections.deque()
+
+ def _get(self):
+ return self._queue.popleft()
+
+ def _put(self, item):
+ self._queue.append(item)
+ # End of the overridable methods.
+
+ def __put_internal(self, item):
+ self._unfinished_tasks += 1
+ self._finished.clear()
+ self._put(item)
+
+ def _consume_expired(self):
+ # Remove timed-out waiters.
+ while self._putters and self._putters[0][1].done():
+ self._putters.popleft()
+
+ while self._getters and self._getters[0].done():
+ self._getters.popleft()
+
+ def __repr__(self):
+ return '<%s at %s %s>' % (
+ type(self).__name__, hex(id(self)), self._format())
+
+ def __str__(self):
+ return '<%s %s>' % (type(self).__name__, self._format())
+
+ def _format(self):
+ result = 'maxsize=%r' % (self.maxsize, )
+ if getattr(self, '_queue', None):
+ result += ' queue=%r' % self._queue
+ if self._getters:
+ result += ' getters[%s]' % len(self._getters)
+ if self._putters:
+ result += ' putters[%s]' % len(self._putters)
+ if self._unfinished_tasks:
+ result += ' tasks=%s' % self._unfinished_tasks
+ return result
+
+
+class PriorityQueue(Queue):
+ """A `.Queue` that retrieves entries in priority order, lowest first.
+
+ Entries are typically tuples like ``(priority number, data)``.
+
+ .. testcode::
+
+ from tornado.queues import PriorityQueue
+
+ q = PriorityQueue()
+ q.put((1, 'medium-priority item'))
+ q.put((0, 'high-priority item'))
+ q.put((10, 'low-priority item'))
+
+ print(q.get_nowait())
+ print(q.get_nowait())
+ print(q.get_nowait())
+
+ .. testoutput::
+
+ (0, 'high-priority item')
+ (1, 'medium-priority item')
+ (10, 'low-priority item')
+ """
+ def _init(self):
+ self._queue = []
+
+ def _put(self, item):
+ heapq.heappush(self._queue, item)
+
+ def _get(self):
+ return heapq.heappop(self._queue)
+
+
+class LifoQueue(Queue):
+ """A `.Queue` that retrieves the most recently put items first.
+
+ .. testcode::
+
+ from tornado.queues import LifoQueue
+
+ q = LifoQueue()
+ q.put(3)
+ q.put(2)
+ q.put(1)
+
+ print(q.get_nowait())
+ print(q.get_nowait())
+ print(q.get_nowait())
+
+ .. testoutput::
+
+ 1
+ 2
+ 3
+ """
+ def _init(self):
+ self._queue = []
+
+ def _put(self, item):
+ self._queue.append(item)
+
+ def _get(self):
+ return self._queue.pop()
diff --git a/contrib/python/tornado/tornado-4/tornado/routing.py b/contrib/python/tornado/tornado-4/tornado/routing.py
index 6762dc05bc..2f7e036e1d 100644
--- a/contrib/python/tornado/tornado-4/tornado/routing.py
+++ b/contrib/python/tornado/tornado-4/tornado/routing.py
@@ -1,625 +1,625 @@
-# Copyright 2015 The Tornado Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Flexible routing implementation.
-
-Tornado routes HTTP requests to appropriate handlers using `Router`
-class implementations. The `tornado.web.Application` class is a
-`Router` implementation and may be used directly, or the classes in
-this module may be used for additional flexibility. The `RuleRouter`
-class can match on more criteria than `.Application`, or the `Router`
-interface can be subclassed for maximum customization.
-
-`Router` interface extends `~.httputil.HTTPServerConnectionDelegate`
-to provide additional routing capabilities. This also means that any
-`Router` implementation can be used directly as a ``request_callback``
-for `~.httpserver.HTTPServer` constructor.
-
-`Router` subclass must implement a ``find_handler`` method to provide
-a suitable `~.httputil.HTTPMessageDelegate` instance to handle the
-request:
-
-.. code-block:: python
-
- class CustomRouter(Router):
- def find_handler(self, request, **kwargs):
- # some routing logic providing a suitable HTTPMessageDelegate instance
- return MessageDelegate(request.connection)
-
- class MessageDelegate(HTTPMessageDelegate):
- def __init__(self, connection):
- self.connection = connection
-
- def finish(self):
- self.connection.write_headers(
- ResponseStartLine("HTTP/1.1", 200, "OK"),
- HTTPHeaders({"Content-Length": "2"}),
- b"OK")
- self.connection.finish()
-
- router = CustomRouter()
- server = HTTPServer(router)
-
-The main responsibility of `Router` implementation is to provide a
-mapping from a request to `~.httputil.HTTPMessageDelegate` instance
-that will handle this request. In the example above we can see that
-routing is possible even without instantiating an `~.web.Application`.
-
-For routing to `~.web.RequestHandler` implementations we need an
-`~.web.Application` instance. `~.web.Application.get_handler_delegate`
-provides a convenient way to create `~.httputil.HTTPMessageDelegate`
-for a given request and `~.web.RequestHandler`.
-
-Here is a simple example of how we can we route to
-`~.web.RequestHandler` subclasses by HTTP method:
-
-.. code-block:: python
-
- resources = {}
-
- class GetResource(RequestHandler):
- def get(self, path):
- if path not in resources:
- raise HTTPError(404)
-
- self.finish(resources[path])
-
- class PostResource(RequestHandler):
- def post(self, path):
- resources[path] = self.request.body
-
- class HTTPMethodRouter(Router):
- def __init__(self, app):
- self.app = app
-
- def find_handler(self, request, **kwargs):
- handler = GetResource if request.method == "GET" else PostResource
- return self.app.get_handler_delegate(request, handler, path_args=[request.path])
-
- router = HTTPMethodRouter(Application())
- server = HTTPServer(router)
-
-`ReversibleRouter` interface adds the ability to distinguish between
-the routes and reverse them to the original urls using route's name
-and additional arguments. `~.web.Application` is itself an
-implementation of `ReversibleRouter` class.
-
-`RuleRouter` and `ReversibleRuleRouter` are implementations of
-`Router` and `ReversibleRouter` interfaces and can be used for
-creating rule-based routing configurations.
-
-Rules are instances of `Rule` class. They contain a `Matcher`, which
-provides the logic for determining whether the rule is a match for a
-particular request and a target, which can be one of the following.
-
-1) An instance of `~.httputil.HTTPServerConnectionDelegate`:
-
-.. code-block:: python
-
- router = RuleRouter([
- Rule(PathMatches("/handler"), ConnectionDelegate()),
- # ... more rules
- ])
-
- class ConnectionDelegate(HTTPServerConnectionDelegate):
- def start_request(self, server_conn, request_conn):
- return MessageDelegate(request_conn)
-
-2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type:
-
-.. code-block:: python
-
- router = RuleRouter([
- Rule(PathMatches("/callable"), request_callable)
- ])
-
- def request_callable(request):
- request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK")
- request.finish()
-
-3) Another `Router` instance:
-
-.. code-block:: python
-
- router = RuleRouter([
- Rule(PathMatches("/router.*"), CustomRouter())
- ])
-
-Of course a nested `RuleRouter` or a `~.web.Application` is allowed:
-
-.. code-block:: python
-
- router = RuleRouter([
- Rule(HostMatches("example.com"), RuleRouter([
- Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))),
- ]))
- ])
-
- server = HTTPServer(router)
-
-In the example below `RuleRouter` is used to route between applications:
-
-.. code-block:: python
-
- app1 = Application([
- (r"/app1/handler", Handler1),
- # other handlers ...
- ])
-
- app2 = Application([
- (r"/app2/handler", Handler2),
- # other handlers ...
- ])
-
- router = RuleRouter([
- Rule(PathMatches("/app1.*"), app1),
- Rule(PathMatches("/app2.*"), app2)
- ])
-
- server = HTTPServer(router)
-
-For more information on application-level routing see docs for `~.web.Application`.
-
-.. versionadded:: 4.5
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import re
-from functools import partial
-
-from tornado import httputil
-from tornado.httpserver import _CallableAdapter
-from tornado.escape import url_escape, url_unescape, utf8
-from tornado.log import app_log
-from tornado.util import basestring_type, import_object, re_unescape, unicode_type
-
-try:
- import typing # noqa
-except ImportError:
- pass
-
-
-class Router(httputil.HTTPServerConnectionDelegate):
- """Abstract router interface."""
-
- def find_handler(self, request, **kwargs):
- # type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate
- """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
- that can serve the request.
- Routing implementations may pass additional kwargs to extend the routing logic.
-
- :arg httputil.HTTPServerRequest request: current HTTP request.
- :arg kwargs: additional keyword arguments passed by routing implementation.
- :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
- process the request.
- """
- raise NotImplementedError()
-
- def start_request(self, server_conn, request_conn):
- return _RoutingDelegate(self, server_conn, request_conn)
-
-
-class ReversibleRouter(Router):
- """Abstract router interface for routers that can handle named routes
- and support reversing them to original urls.
- """
-
- def reverse_url(self, name, *args):
- """Returns url string for a given route name and arguments
- or ``None`` if no match is found.
-
- :arg str name: route name.
- :arg args: url parameters.
- :returns: parametrized url string for a given route name (or ``None``).
- """
- raise NotImplementedError()
-
-
-class _RoutingDelegate(httputil.HTTPMessageDelegate):
- def __init__(self, router, server_conn, request_conn):
- self.server_conn = server_conn
- self.request_conn = request_conn
- self.delegate = None
- self.router = router # type: Router
-
- def headers_received(self, start_line, headers):
- request = httputil.HTTPServerRequest(
- connection=self.request_conn,
- server_connection=self.server_conn,
- start_line=start_line, headers=headers)
-
- self.delegate = self.router.find_handler(request)
- return self.delegate.headers_received(start_line, headers)
-
- def data_received(self, chunk):
- return self.delegate.data_received(chunk)
-
- def finish(self):
- self.delegate.finish()
-
- def on_connection_close(self):
- self.delegate.on_connection_close()
-
-
-class RuleRouter(Router):
- """Rule-based router implementation."""
-
- def __init__(self, rules=None):
- """Constructs a router from an ordered list of rules::
-
- RuleRouter([
- Rule(PathMatches("/handler"), Target),
- # ... more rules
- ])
-
- You can also omit explicit `Rule` constructor and use tuples of arguments::
-
- RuleRouter([
- (PathMatches("/handler"), Target),
- ])
-
- `PathMatches` is a default matcher, so the example above can be simplified::
-
- RuleRouter([
- ("/handler", Target),
- ])
-
- In the examples above, ``Target`` can be a nested `Router` instance, an instance of
- `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument.
-
- :arg rules: a list of `Rule` instances or tuples of `Rule`
- constructor arguments.
- """
- self.rules = [] # type: typing.List[Rule]
- if rules:
- self.add_rules(rules)
-
- def add_rules(self, rules):
- """Appends new rules to the router.
-
- :arg rules: a list of Rule instances (or tuples of arguments, which are
- passed to Rule constructor).
- """
- for rule in rules:
- if isinstance(rule, (tuple, list)):
- assert len(rule) in (2, 3, 4)
- if isinstance(rule[0], basestring_type):
- rule = Rule(PathMatches(rule[0]), *rule[1:])
- else:
- rule = Rule(*rule)
-
- self.rules.append(self.process_rule(rule))
-
- def process_rule(self, rule):
- """Override this method for additional preprocessing of each rule.
-
- :arg Rule rule: a rule to be processed.
- :returns: the same or modified Rule instance.
- """
- return rule
-
- def find_handler(self, request, **kwargs):
- for rule in self.rules:
- target_params = rule.matcher.match(request)
- if target_params is not None:
- if rule.target_kwargs:
- target_params['target_kwargs'] = rule.target_kwargs
-
- delegate = self.get_target_delegate(
- rule.target, request, **target_params)
-
- if delegate is not None:
- return delegate
-
- return None
-
- def get_target_delegate(self, target, request, **target_params):
- """Returns an instance of `~.httputil.HTTPMessageDelegate` for a
- Rule's target. This method is called by `~.find_handler` and can be
- extended to provide additional target types.
-
- :arg target: a Rule's target.
- :arg httputil.HTTPServerRequest request: current request.
- :arg target_params: additional parameters that can be useful
- for `~.httputil.HTTPMessageDelegate` creation.
- """
- if isinstance(target, Router):
- return target.find_handler(request, **target_params)
-
- elif isinstance(target, httputil.HTTPServerConnectionDelegate):
- return target.start_request(request.server_connection, request.connection)
-
- elif callable(target):
- return _CallableAdapter(
- partial(target, **target_params), request.connection
- )
-
- return None
-
-
-class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
- """A rule-based router that implements ``reverse_url`` method.
-
- Each rule added to this router may have a ``name`` attribute that can be
- used to reconstruct an original uri. The actual reconstruction takes place
- in a rule's matcher (see `Matcher.reverse`).
- """
-
- def __init__(self, rules=None):
- self.named_rules = {} # type: typing.Dict[str]
- super(ReversibleRuleRouter, self).__init__(rules)
-
- def process_rule(self, rule):
- rule = super(ReversibleRuleRouter, self).process_rule(rule)
-
- if rule.name:
- if rule.name in self.named_rules:
- app_log.warning(
- "Multiple handlers named %s; replacing previous value",
- rule.name)
- self.named_rules[rule.name] = rule
-
- return rule
-
- def reverse_url(self, name, *args):
- if name in self.named_rules:
- return self.named_rules[name].matcher.reverse(*args)
-
- for rule in self.rules:
- if isinstance(rule.target, ReversibleRouter):
- reversed_url = rule.target.reverse_url(name, *args)
- if reversed_url is not None:
- return reversed_url
-
- return None
-
-
-class Rule(object):
- """A routing rule."""
-
- def __init__(self, matcher, target, target_kwargs=None, name=None):
- """Constructs a Rule instance.
-
- :arg Matcher matcher: a `Matcher` instance used for determining
- whether the rule should be considered a match for a specific
- request.
- :arg target: a Rule's target (typically a ``RequestHandler`` or
- `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
- depending on routing implementation).
- :arg dict target_kwargs: a dict of parameters that can be useful
- at the moment of target instantiation (for example, ``status_code``
- for a ``RequestHandler`` subclass). They end up in
- ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate`
- method.
- :arg str name: the name of the rule that can be used to find it
- in `ReversibleRouter.reverse_url` implementation.
- """
- if isinstance(target, str):
- # import the Module and instantiate the class
- # Must be a fully qualified name (module.ClassName)
- target = import_object(target)
-
- self.matcher = matcher # type: Matcher
- self.target = target
- self.target_kwargs = target_kwargs if target_kwargs else {}
- self.name = name
-
- def reverse(self, *args):
- return self.matcher.reverse(*args)
-
- def __repr__(self):
- return '%s(%r, %s, kwargs=%r, name=%r)' % \
- (self.__class__.__name__, self.matcher,
- self.target, self.target_kwargs, self.name)
-
-
-class Matcher(object):
- """Represents a matcher for request features."""
-
- def match(self, request):
- """Matches current instance against the request.
-
- :arg httputil.HTTPServerRequest request: current HTTP request
- :returns: a dict of parameters to be passed to the target handler
- (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs``
- can be passed for proper `~.web.RequestHandler` instantiation).
- An empty dict is a valid (and common) return value to indicate a match
- when the argument-passing features are not used.
- ``None`` must be returned to indicate that there is no match."""
- raise NotImplementedError()
-
- def reverse(self, *args):
- """Reconstructs full url from matcher instance and additional arguments."""
- return None
-
-
-class AnyMatches(Matcher):
- """Matches any request."""
-
- def match(self, request):
- return {}
-
-
-class HostMatches(Matcher):
- """Matches requests from hosts specified by ``host_pattern`` regex."""
-
- def __init__(self, host_pattern):
- if isinstance(host_pattern, basestring_type):
- if not host_pattern.endswith("$"):
- host_pattern += "$"
- self.host_pattern = re.compile(host_pattern)
- else:
- self.host_pattern = host_pattern
-
- def match(self, request):
- if self.host_pattern.match(request.host_name):
- return {}
-
- return None
-
-
-class DefaultHostMatches(Matcher):
- """Matches requests from host that is equal to application's default_host.
- Always returns no match if ``X-Real-Ip`` header is present.
- """
-
- def __init__(self, application, host_pattern):
- self.application = application
- self.host_pattern = host_pattern
-
- def match(self, request):
- # Look for default host if not behind load balancer (for debugging)
- if "X-Real-Ip" not in request.headers:
- if self.host_pattern.match(self.application.default_host):
- return {}
- return None
-
-
-class PathMatches(Matcher):
- """Matches requests with paths specified by ``path_pattern`` regex."""
-
- def __init__(self, path_pattern):
- if isinstance(path_pattern, basestring_type):
- if not path_pattern.endswith('$'):
- path_pattern += '$'
- self.regex = re.compile(path_pattern)
- else:
- self.regex = path_pattern
-
- assert len(self.regex.groupindex) in (0, self.regex.groups), \
- ("groups in url regexes must either be all named or all "
- "positional: %r" % self.regex.pattern)
-
- self._path, self._group_count = self._find_groups()
-
- def match(self, request):
- match = self.regex.match(request.path)
- if match is None:
- return None
- if not self.regex.groups:
- return {}
-
- path_args, path_kwargs = [], {}
-
- # Pass matched groups to the handler. Since
- # match.groups() includes both named and
- # unnamed groups, we want to use either groups
- # or groupdict but not both.
- if self.regex.groupindex:
- path_kwargs = dict(
- (str(k), _unquote_or_none(v))
- for (k, v) in match.groupdict().items())
- else:
- path_args = [_unquote_or_none(s) for s in match.groups()]
-
- return dict(path_args=path_args, path_kwargs=path_kwargs)
-
- def reverse(self, *args):
- if self._path is None:
- raise ValueError("Cannot reverse url regex " + self.regex.pattern)
- assert len(args) == self._group_count, "required number of arguments " \
- "not found"
- if not len(args):
- return self._path
- converted_args = []
- for a in args:
- if not isinstance(a, (unicode_type, bytes)):
- a = str(a)
- converted_args.append(url_escape(utf8(a), plus=False))
- return self._path % tuple(converted_args)
-
- def _find_groups(self):
- """Returns a tuple (reverse string, group count) for a url.
-
- For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
- would return ('/%s/%s/', 2).
- """
- pattern = self.regex.pattern
- if pattern.startswith('^'):
- pattern = pattern[1:]
- if pattern.endswith('$'):
- pattern = pattern[:-1]
-
- if self.regex.groups != pattern.count('('):
- # The pattern is too complicated for our simplistic matching,
- # so we can't support reversing it.
- return None, None
-
- pieces = []
- for fragment in pattern.split('('):
- if ')' in fragment:
- paren_loc = fragment.index(')')
- if paren_loc >= 0:
- pieces.append('%s' + fragment[paren_loc + 1:])
- else:
- try:
- unescaped_fragment = re_unescape(fragment)
- except ValueError as exc:
- # If we can't unescape part of it, we can't
- # reverse this url.
- return (None, None)
- pieces.append(unescaped_fragment)
-
- return ''.join(pieces), self.regex.groups
-
-
-class URLSpec(Rule):
- """Specifies mappings between URLs and handlers.
-
- .. versionchanged: 4.5
- `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
- backwards compatibility.
- """
- def __init__(self, pattern, handler, kwargs=None, name=None):
- """Parameters:
-
- * ``pattern``: Regular expression to be matched. Any capturing
- groups in the regex will be passed in to the handler's
- get/post/etc methods as arguments (by keyword if named, by
- position if unnamed. Named and unnamed capturing groups may
- may not be mixed in the same rule).
-
- * ``handler``: `~.web.RequestHandler` subclass to be invoked.
-
- * ``kwargs`` (optional): A dictionary of additional arguments
- to be passed to the handler's constructor.
-
- * ``name`` (optional): A name for this handler. Used by
- `~.web.Application.reverse_url`.
-
- """
- super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name)
-
- self.regex = self.matcher.regex
- self.handler_class = self.target
- self.kwargs = kwargs
-
- def __repr__(self):
- return '%s(%r, %s, kwargs=%r, name=%r)' % \
- (self.__class__.__name__, self.regex.pattern,
- self.handler_class, self.kwargs, self.name)
-
-
-def _unquote_or_none(s):
- """None-safe wrapper around url_unescape to handle unmatched optional
- groups correctly.
-
- Note that args are passed as bytes so the handler can decide what
- encoding to use.
- """
- if s is None:
- return s
- return url_unescape(s, encoding=None, plus=False)
+# Copyright 2015 The Tornado Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Flexible routing implementation.
+
+Tornado routes HTTP requests to appropriate handlers using `Router`
+class implementations. The `tornado.web.Application` class is a
+`Router` implementation and may be used directly, or the classes in
+this module may be used for additional flexibility. The `RuleRouter`
+class can match on more criteria than `.Application`, or the `Router`
+interface can be subclassed for maximum customization.
+
+`Router` interface extends `~.httputil.HTTPServerConnectionDelegate`
+to provide additional routing capabilities. This also means that any
+`Router` implementation can be used directly as a ``request_callback``
+for `~.httpserver.HTTPServer` constructor.
+
+`Router` subclass must implement a ``find_handler`` method to provide
+a suitable `~.httputil.HTTPMessageDelegate` instance to handle the
+request:
+
+.. code-block:: python
+
+ class CustomRouter(Router):
+ def find_handler(self, request, **kwargs):
+ # some routing logic providing a suitable HTTPMessageDelegate instance
+ return MessageDelegate(request.connection)
+
+ class MessageDelegate(HTTPMessageDelegate):
+ def __init__(self, connection):
+ self.connection = connection
+
+ def finish(self):
+ self.connection.write_headers(
+ ResponseStartLine("HTTP/1.1", 200, "OK"),
+ HTTPHeaders({"Content-Length": "2"}),
+ b"OK")
+ self.connection.finish()
+
+ router = CustomRouter()
+ server = HTTPServer(router)
+
+The main responsibility of `Router` implementation is to provide a
+mapping from a request to `~.httputil.HTTPMessageDelegate` instance
+that will handle this request. In the example above we can see that
+routing is possible even without instantiating an `~.web.Application`.
+
+For routing to `~.web.RequestHandler` implementations we need an
+`~.web.Application` instance. `~.web.Application.get_handler_delegate`
+provides a convenient way to create `~.httputil.HTTPMessageDelegate`
+for a given request and `~.web.RequestHandler`.
+
+Here is a simple example of how we can we route to
+`~.web.RequestHandler` subclasses by HTTP method:
+
+.. code-block:: python
+
+ resources = {}
+
+ class GetResource(RequestHandler):
+ def get(self, path):
+ if path not in resources:
+ raise HTTPError(404)
+
+ self.finish(resources[path])
+
+ class PostResource(RequestHandler):
+ def post(self, path):
+ resources[path] = self.request.body
+
+ class HTTPMethodRouter(Router):
+ def __init__(self, app):
+ self.app = app
+
+ def find_handler(self, request, **kwargs):
+ handler = GetResource if request.method == "GET" else PostResource
+ return self.app.get_handler_delegate(request, handler, path_args=[request.path])
+
+ router = HTTPMethodRouter(Application())
+ server = HTTPServer(router)
+
+`ReversibleRouter` interface adds the ability to distinguish between
+the routes and reverse them to the original urls using route's name
+and additional arguments. `~.web.Application` is itself an
+implementation of `ReversibleRouter` class.
+
+`RuleRouter` and `ReversibleRuleRouter` are implementations of
+`Router` and `ReversibleRouter` interfaces and can be used for
+creating rule-based routing configurations.
+
+Rules are instances of `Rule` class. They contain a `Matcher`, which
+provides the logic for determining whether the rule is a match for a
+particular request and a target, which can be one of the following.
+
+1) An instance of `~.httputil.HTTPServerConnectionDelegate`:
+
+.. code-block:: python
+
+ router = RuleRouter([
+ Rule(PathMatches("/handler"), ConnectionDelegate()),
+ # ... more rules
+ ])
+
+ class ConnectionDelegate(HTTPServerConnectionDelegate):
+ def start_request(self, server_conn, request_conn):
+ return MessageDelegate(request_conn)
+
+2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type:
+
+.. code-block:: python
+
+ router = RuleRouter([
+ Rule(PathMatches("/callable"), request_callable)
+ ])
+
+ def request_callable(request):
+ request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK")
+ request.finish()
+
+3) Another `Router` instance:
+
+.. code-block:: python
+
+ router = RuleRouter([
+ Rule(PathMatches("/router.*"), CustomRouter())
+ ])
+
+Of course a nested `RuleRouter` or a `~.web.Application` is allowed:
+
+.. code-block:: python
+
+ router = RuleRouter([
+ Rule(HostMatches("example.com"), RuleRouter([
+ Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))),
+ ]))
+ ])
+
+ server = HTTPServer(router)
+
+In the example below `RuleRouter` is used to route between applications:
+
+.. code-block:: python
+
+ app1 = Application([
+ (r"/app1/handler", Handler1),
+ # other handlers ...
+ ])
+
+ app2 = Application([
+ (r"/app2/handler", Handler2),
+ # other handlers ...
+ ])
+
+ router = RuleRouter([
+ Rule(PathMatches("/app1.*"), app1),
+ Rule(PathMatches("/app2.*"), app2)
+ ])
+
+ server = HTTPServer(router)
+
+For more information on application-level routing see docs for `~.web.Application`.
+
+.. versionadded:: 4.5
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import re
+from functools import partial
+
+from tornado import httputil
+from tornado.httpserver import _CallableAdapter
+from tornado.escape import url_escape, url_unescape, utf8
+from tornado.log import app_log
+from tornado.util import basestring_type, import_object, re_unescape, unicode_type
+
+try:
+ import typing # noqa
+except ImportError:
+ pass
+
+
+class Router(httputil.HTTPServerConnectionDelegate):
+ """Abstract router interface."""
+
+ def find_handler(self, request, **kwargs):
+ # type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate
+ """Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
+ that can serve the request.
+ Routing implementations may pass additional kwargs to extend the routing logic.
+
+ :arg httputil.HTTPServerRequest request: current HTTP request.
+ :arg kwargs: additional keyword arguments passed by routing implementation.
+ :returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
+ process the request.
+ """
+ raise NotImplementedError()
+
+ def start_request(self, server_conn, request_conn):
+ return _RoutingDelegate(self, server_conn, request_conn)
+
+
+class ReversibleRouter(Router):
+ """Abstract router interface for routers that can handle named routes
+ and support reversing them to original urls.
+ """
+
+ def reverse_url(self, name, *args):
+ """Returns url string for a given route name and arguments
+ or ``None`` if no match is found.
+
+ :arg str name: route name.
+ :arg args: url parameters.
+ :returns: parametrized url string for a given route name (or ``None``).
+ """
+ raise NotImplementedError()
+
+
+class _RoutingDelegate(httputil.HTTPMessageDelegate):
+ def __init__(self, router, server_conn, request_conn):
+ self.server_conn = server_conn
+ self.request_conn = request_conn
+ self.delegate = None
+ self.router = router # type: Router
+
+ def headers_received(self, start_line, headers):
+ request = httputil.HTTPServerRequest(
+ connection=self.request_conn,
+ server_connection=self.server_conn,
+ start_line=start_line, headers=headers)
+
+ self.delegate = self.router.find_handler(request)
+ return self.delegate.headers_received(start_line, headers)
+
+ def data_received(self, chunk):
+ return self.delegate.data_received(chunk)
+
+ def finish(self):
+ self.delegate.finish()
+
+ def on_connection_close(self):
+ self.delegate.on_connection_close()
+
+
+class RuleRouter(Router):
+ """Rule-based router implementation."""
+
+ def __init__(self, rules=None):
+ """Constructs a router from an ordered list of rules::
+
+ RuleRouter([
+ Rule(PathMatches("/handler"), Target),
+ # ... more rules
+ ])
+
+ You can also omit explicit `Rule` constructor and use tuples of arguments::
+
+ RuleRouter([
+ (PathMatches("/handler"), Target),
+ ])
+
+ `PathMatches` is a default matcher, so the example above can be simplified::
+
+ RuleRouter([
+ ("/handler", Target),
+ ])
+
+ In the examples above, ``Target`` can be a nested `Router` instance, an instance of
+ `~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument.
+
+ :arg rules: a list of `Rule` instances or tuples of `Rule`
+ constructor arguments.
+ """
+ self.rules = [] # type: typing.List[Rule]
+ if rules:
+ self.add_rules(rules)
+
+ def add_rules(self, rules):
+ """Appends new rules to the router.
+
+ :arg rules: a list of Rule instances (or tuples of arguments, which are
+ passed to Rule constructor).
+ """
+ for rule in rules:
+ if isinstance(rule, (tuple, list)):
+ assert len(rule) in (2, 3, 4)
+ if isinstance(rule[0], basestring_type):
+ rule = Rule(PathMatches(rule[0]), *rule[1:])
+ else:
+ rule = Rule(*rule)
+
+ self.rules.append(self.process_rule(rule))
+
+ def process_rule(self, rule):
+ """Override this method for additional preprocessing of each rule.
+
+ :arg Rule rule: a rule to be processed.
+ :returns: the same or modified Rule instance.
+ """
+ return rule
+
+ def find_handler(self, request, **kwargs):
+ for rule in self.rules:
+ target_params = rule.matcher.match(request)
+ if target_params is not None:
+ if rule.target_kwargs:
+ target_params['target_kwargs'] = rule.target_kwargs
+
+ delegate = self.get_target_delegate(
+ rule.target, request, **target_params)
+
+ if delegate is not None:
+ return delegate
+
+ return None
+
+ def get_target_delegate(self, target, request, **target_params):
+ """Returns an instance of `~.httputil.HTTPMessageDelegate` for a
+ Rule's target. This method is called by `~.find_handler` and can be
+ extended to provide additional target types.
+
+ :arg target: a Rule's target.
+ :arg httputil.HTTPServerRequest request: current request.
+ :arg target_params: additional parameters that can be useful
+ for `~.httputil.HTTPMessageDelegate` creation.
+ """
+ if isinstance(target, Router):
+ return target.find_handler(request, **target_params)
+
+ elif isinstance(target, httputil.HTTPServerConnectionDelegate):
+ return target.start_request(request.server_connection, request.connection)
+
+ elif callable(target):
+ return _CallableAdapter(
+ partial(target, **target_params), request.connection
+ )
+
+ return None
+
+
+class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
+ """A rule-based router that implements ``reverse_url`` method.
+
+ Each rule added to this router may have a ``name`` attribute that can be
+ used to reconstruct an original uri. The actual reconstruction takes place
+ in a rule's matcher (see `Matcher.reverse`).
+ """
+
+ def __init__(self, rules=None):
+ self.named_rules = {} # type: typing.Dict[str]
+ super(ReversibleRuleRouter, self).__init__(rules)
+
+ def process_rule(self, rule):
+ rule = super(ReversibleRuleRouter, self).process_rule(rule)
+
+ if rule.name:
+ if rule.name in self.named_rules:
+ app_log.warning(
+ "Multiple handlers named %s; replacing previous value",
+ rule.name)
+ self.named_rules[rule.name] = rule
+
+ return rule
+
+ def reverse_url(self, name, *args):
+ if name in self.named_rules:
+ return self.named_rules[name].matcher.reverse(*args)
+
+ for rule in self.rules:
+ if isinstance(rule.target, ReversibleRouter):
+ reversed_url = rule.target.reverse_url(name, *args)
+ if reversed_url is not None:
+ return reversed_url
+
+ return None
+
+
+class Rule(object):
+ """A routing rule."""
+
+ def __init__(self, matcher, target, target_kwargs=None, name=None):
+ """Constructs a Rule instance.
+
+ :arg Matcher matcher: a `Matcher` instance used for determining
+ whether the rule should be considered a match for a specific
+ request.
+ :arg target: a Rule's target (typically a ``RequestHandler`` or
+ `~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
+ depending on routing implementation).
+ :arg dict target_kwargs: a dict of parameters that can be useful
+ at the moment of target instantiation (for example, ``status_code``
+ for a ``RequestHandler`` subclass). They end up in
+ ``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate`
+ method.
+ :arg str name: the name of the rule that can be used to find it
+ in `ReversibleRouter.reverse_url` implementation.
+ """
+ if isinstance(target, str):
+ # import the Module and instantiate the class
+ # Must be a fully qualified name (module.ClassName)
+ target = import_object(target)
+
+ self.matcher = matcher # type: Matcher
+ self.target = target
+ self.target_kwargs = target_kwargs if target_kwargs else {}
+ self.name = name
+
+ def reverse(self, *args):
+ return self.matcher.reverse(*args)
+
+ def __repr__(self):
+ return '%s(%r, %s, kwargs=%r, name=%r)' % \
+ (self.__class__.__name__, self.matcher,
+ self.target, self.target_kwargs, self.name)
+
+
+class Matcher(object):
+ """Represents a matcher for request features."""
+
+ def match(self, request):
+ """Matches current instance against the request.
+
+ :arg httputil.HTTPServerRequest request: current HTTP request
+ :returns: a dict of parameters to be passed to the target handler
+ (for example, ``handler_kwargs``, ``path_args``, ``path_kwargs``
+ can be passed for proper `~.web.RequestHandler` instantiation).
+ An empty dict is a valid (and common) return value to indicate a match
+ when the argument-passing features are not used.
+ ``None`` must be returned to indicate that there is no match."""
+ raise NotImplementedError()
+
+ def reverse(self, *args):
+ """Reconstructs full url from matcher instance and additional arguments."""
+ return None
+
+
+class AnyMatches(Matcher):
+ """Matches any request."""
+
+ def match(self, request):
+ return {}
+
+
+class HostMatches(Matcher):
+ """Matches requests from hosts specified by ``host_pattern`` regex."""
+
+ def __init__(self, host_pattern):
+ if isinstance(host_pattern, basestring_type):
+ if not host_pattern.endswith("$"):
+ host_pattern += "$"
+ self.host_pattern = re.compile(host_pattern)
+ else:
+ self.host_pattern = host_pattern
+
+ def match(self, request):
+ if self.host_pattern.match(request.host_name):
+ return {}
+
+ return None
+
+
+class DefaultHostMatches(Matcher):
+ """Matches requests from host that is equal to application's default_host.
+ Always returns no match if ``X-Real-Ip`` header is present.
+ """
+
+ def __init__(self, application, host_pattern):
+ self.application = application
+ self.host_pattern = host_pattern
+
+ def match(self, request):
+ # Look for default host if not behind load balancer (for debugging)
+ if "X-Real-Ip" not in request.headers:
+ if self.host_pattern.match(self.application.default_host):
+ return {}
+ return None
+
+
+class PathMatches(Matcher):
+ """Matches requests with paths specified by ``path_pattern`` regex."""
+
+ def __init__(self, path_pattern):
+ if isinstance(path_pattern, basestring_type):
+ if not path_pattern.endswith('$'):
+ path_pattern += '$'
+ self.regex = re.compile(path_pattern)
+ else:
+ self.regex = path_pattern
+
+ assert len(self.regex.groupindex) in (0, self.regex.groups), \
+ ("groups in url regexes must either be all named or all "
+ "positional: %r" % self.regex.pattern)
+
+ self._path, self._group_count = self._find_groups()
+
+ def match(self, request):
+ match = self.regex.match(request.path)
+ if match is None:
+ return None
+ if not self.regex.groups:
+ return {}
+
+ path_args, path_kwargs = [], {}
+
+ # Pass matched groups to the handler. Since
+ # match.groups() includes both named and
+ # unnamed groups, we want to use either groups
+ # or groupdict but not both.
+ if self.regex.groupindex:
+ path_kwargs = dict(
+ (str(k), _unquote_or_none(v))
+ for (k, v) in match.groupdict().items())
+ else:
+ path_args = [_unquote_or_none(s) for s in match.groups()]
+
+ return dict(path_args=path_args, path_kwargs=path_kwargs)
+
+ def reverse(self, *args):
+ if self._path is None:
+ raise ValueError("Cannot reverse url regex " + self.regex.pattern)
+ assert len(args) == self._group_count, "required number of arguments " \
+ "not found"
+ if not len(args):
+ return self._path
+ converted_args = []
+ for a in args:
+ if not isinstance(a, (unicode_type, bytes)):
+ a = str(a)
+ converted_args.append(url_escape(utf8(a), plus=False))
+ return self._path % tuple(converted_args)
+
+ def _find_groups(self):
+ """Returns a tuple (reverse string, group count) for a url.
+
+ For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
+ would return ('/%s/%s/', 2).
+ """
+ pattern = self.regex.pattern
+ if pattern.startswith('^'):
+ pattern = pattern[1:]
+ if pattern.endswith('$'):
+ pattern = pattern[:-1]
+
+ if self.regex.groups != pattern.count('('):
+ # The pattern is too complicated for our simplistic matching,
+ # so we can't support reversing it.
+ return None, None
+
+ pieces = []
+ for fragment in pattern.split('('):
+ if ')' in fragment:
+ paren_loc = fragment.index(')')
+ if paren_loc >= 0:
+ pieces.append('%s' + fragment[paren_loc + 1:])
+ else:
+ try:
+ unescaped_fragment = re_unescape(fragment)
+ except ValueError as exc:
+ # If we can't unescape part of it, we can't
+ # reverse this url.
+ return (None, None)
+ pieces.append(unescaped_fragment)
+
+ return ''.join(pieces), self.regex.groups
+
+
+class URLSpec(Rule):
+ """Specifies mappings between URLs and handlers.
+
+ .. versionchanged: 4.5
+ `URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
+ backwards compatibility.
+ """
+ def __init__(self, pattern, handler, kwargs=None, name=None):
+ """Parameters:
+
+ * ``pattern``: Regular expression to be matched. Any capturing
+ groups in the regex will be passed in to the handler's
+ get/post/etc methods as arguments (by keyword if named, by
+ position if unnamed. Named and unnamed capturing groups may
+ may not be mixed in the same rule).
+
+ * ``handler``: `~.web.RequestHandler` subclass to be invoked.
+
+ * ``kwargs`` (optional): A dictionary of additional arguments
+ to be passed to the handler's constructor.
+
+ * ``name`` (optional): A name for this handler. Used by
+ `~.web.Application.reverse_url`.
+
+ """
+ super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name)
+
+ self.regex = self.matcher.regex
+ self.handler_class = self.target
+ self.kwargs = kwargs
+
+ def __repr__(self):
+ return '%s(%r, %s, kwargs=%r, name=%r)' % \
+ (self.__class__.__name__, self.regex.pattern,
+ self.handler_class, self.kwargs, self.name)
+
+
+def _unquote_or_none(s):
+ """None-safe wrapper around url_unescape to handle unmatched optional
+ groups correctly.
+
+ Note that args are passed as bytes so the handler can decide what
+ encoding to use.
+ """
+ if s is None:
+ return s
+ return url_unescape(s, encoding=None, plus=False)
diff --git a/contrib/python/tornado/tornado-4/tornado/simple_httpclient.py b/contrib/python/tornado/tornado-4/tornado/simple_httpclient.py
index 8fb70707f9..6c7767ab3c 100644
--- a/contrib/python/tornado/tornado-4/tornado/simple_httpclient.py
+++ b/contrib/python/tornado/tornado-4/tornado/simple_httpclient.py
@@ -1,567 +1,567 @@
-#!/usr/bin/env python
-from __future__ import absolute_import, division, print_function
-
-from tornado.escape import utf8, _unicode
-from tornado import gen
-from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
-from tornado import httputil
-from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
-from tornado.iostream import StreamClosedError
-from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
-from tornado.log import gen_log
-from tornado import stack_context
-from tornado.tcpclient import TCPClient
-from tornado.util import PY3
-
-import base64
-import collections
-import copy
-import functools
-import re
-import socket
-import sys
-from io import BytesIO
-
-
-if PY3:
- import urllib.parse as urlparse
-else:
- import urlparse
-
-try:
- import ssl
-except ImportError:
- # ssl is not available on Google App Engine.
- ssl = None
-
-try:
- import certifi
-except ImportError:
- certifi = None
-
-
-def _default_ca_certs():
- if certifi is None:
- raise Exception("The 'certifi' package is required to use https "
- "in simple_httpclient")
- return certifi.where()
-
-
-class SimpleAsyncHTTPClient(AsyncHTTPClient):
- """Non-blocking HTTP client with no external dependencies.
-
- This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
- Some features found in the curl-based AsyncHTTPClient are not yet
- supported. In particular, proxies are not supported, connections
- are not reused, and callers cannot select the network interface to be
- used.
- """
- def initialize(self, io_loop, max_clients=10,
- hostname_mapping=None, max_buffer_size=104857600,
- resolver=None, defaults=None, max_header_size=None,
- max_body_size=None):
- """Creates a AsyncHTTPClient.
-
- Only a single AsyncHTTPClient instance exists per IOLoop
- in order to provide limitations on the number of pending connections.
- ``force_instance=True`` may be used to suppress this behavior.
-
- Note that because of this implicit reuse, unless ``force_instance``
- is used, only the first call to the constructor actually uses
- its arguments. It is recommended to use the ``configure`` method
- instead of the constructor to ensure that arguments take effect.
-
- ``max_clients`` is the number of concurrent requests that can be
- in progress; when this limit is reached additional requests will be
- queued. Note that time spent waiting in this queue still counts
- against the ``request_timeout``.
-
- ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
- It can be used to make local DNS changes when modifying system-wide
- settings like ``/etc/hosts`` is not possible or desirable (e.g. in
- unittests).
-
- ``max_buffer_size`` (default 100MB) is the number of bytes
- that can be read into memory at once. ``max_body_size``
- (defaults to ``max_buffer_size``) is the largest response body
- that the client will accept. Without a
- ``streaming_callback``, the smaller of these two limits
- applies; with a ``streaming_callback`` only ``max_body_size``
- does.
-
- .. versionchanged:: 4.2
- Added the ``max_body_size`` argument.
- """
- super(SimpleAsyncHTTPClient, self).initialize(io_loop,
- defaults=defaults)
- self.max_clients = max_clients
- self.queue = collections.deque()
- self.active = {}
- self.waiting = {}
- self.max_buffer_size = max_buffer_size
- self.max_header_size = max_header_size
- self.max_body_size = max_body_size
- # TCPClient could create a Resolver for us, but we have to do it
- # ourselves to support hostname_mapping.
- if resolver:
- self.resolver = resolver
- self.own_resolver = False
- else:
- self.resolver = Resolver(io_loop=io_loop)
- self.own_resolver = True
- if hostname_mapping is not None:
- self.resolver = OverrideResolver(resolver=self.resolver,
- mapping=hostname_mapping)
- self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
-
- def close(self):
- super(SimpleAsyncHTTPClient, self).close()
- if self.own_resolver:
- self.resolver.close()
- self.tcp_client.close()
-
- def fetch_impl(self, request, callback):
- key = object()
- self.queue.append((key, request, callback))
- if not len(self.active) < self.max_clients:
- timeout_handle = self.io_loop.add_timeout(
- self.io_loop.time() + min(request.connect_timeout,
- request.request_timeout),
- functools.partial(self._on_timeout, key, "in request queue"))
- else:
- timeout_handle = None
- self.waiting[key] = (request, callback, timeout_handle)
- self._process_queue()
- if self.queue:
- gen_log.debug("max_clients limit reached, request queued. "
- "%d active, %d queued requests." % (
- len(self.active), len(self.queue)))
-
- def _process_queue(self):
- with stack_context.NullContext():
- while self.queue and len(self.active) < self.max_clients:
- key, request, callback = self.queue.popleft()
- if key not in self.waiting:
- continue
- self._remove_timeout(key)
- self.active[key] = (request, callback)
- release_callback = functools.partial(self._release_fetch, key)
- self._handle_request(request, release_callback, callback)
-
- def _connection_class(self):
- return _HTTPConnection
-
- def _handle_request(self, request, release_callback, final_callback):
- self._connection_class()(
- self.io_loop, self, request, release_callback,
- final_callback, self.max_buffer_size, self.tcp_client,
- self.max_header_size, self.max_body_size)
-
- def _release_fetch(self, key):
- del self.active[key]
- self._process_queue()
-
- def _remove_timeout(self, key):
- if key in self.waiting:
- request, callback, timeout_handle = self.waiting[key]
- if timeout_handle is not None:
- self.io_loop.remove_timeout(timeout_handle)
- del self.waiting[key]
-
- def _on_timeout(self, key, info=None):
- """Timeout callback of request.
-
- Construct a timeout HTTPResponse when a timeout occurs.
-
- :arg object key: A simple object to mark the request.
- :info string key: More detailed timeout information.
- """
- request, callback, timeout_handle = self.waiting[key]
- self.queue.remove((key, request, callback))
-
- error_message = "Timeout {0}".format(info) if info else "Timeout"
- timeout_response = HTTPResponse(
- request, 599, error=HTTPError(599, error_message),
- request_time=self.io_loop.time() - request.start_time)
- self.io_loop.add_callback(callback, timeout_response)
- del self.waiting[key]
-
-
-class _HTTPConnection(httputil.HTTPMessageDelegate):
- _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
-
- def __init__(self, io_loop, client, request, release_callback,
- final_callback, max_buffer_size, tcp_client,
- max_header_size, max_body_size):
- self.start_time = io_loop.time()
- self.io_loop = io_loop
- self.client = client
- self.request = request
- self.release_callback = release_callback
- self.final_callback = final_callback
- self.max_buffer_size = max_buffer_size
- self.tcp_client = tcp_client
- self.max_header_size = max_header_size
- self.max_body_size = max_body_size
- self.code = None
- self.headers = None
- self.chunks = []
- self._decompressor = None
- # Timeout handle returned by IOLoop.add_timeout
- self._timeout = None
- self._sockaddr = None
- with stack_context.ExceptionStackContext(self._handle_exception):
- self.parsed = urlparse.urlsplit(_unicode(self.request.url))
- if self.parsed.scheme not in ("http", "https"):
- raise ValueError("Unsupported url scheme: %s" %
- self.request.url)
- # urlsplit results have hostname and port results, but they
- # didn't support ipv6 literals until python 2.7.
- netloc = self.parsed.netloc
- if "@" in netloc:
- userpass, _, netloc = netloc.rpartition("@")
- host, port = httputil.split_host_and_port(netloc)
- if port is None:
- port = 443 if self.parsed.scheme == "https" else 80
- if re.match(r'^\[.*\]$', host):
- # raw ipv6 addresses in urls are enclosed in brackets
- host = host[1:-1]
- self.parsed_hostname = host # save final host for _on_connect
-
- if request.allow_ipv6 is False:
- af = socket.AF_INET
- else:
- af = socket.AF_UNSPEC
-
- ssl_options = self._get_ssl_options(self.parsed.scheme)
-
- timeout = min(self.request.connect_timeout, self.request.request_timeout)
- if timeout:
- self._timeout = self.io_loop.add_timeout(
- self.start_time + timeout,
- stack_context.wrap(functools.partial(self._on_timeout, "while connecting")))
- self.tcp_client.connect(host, port, af=af,
- ssl_options=ssl_options,
- max_buffer_size=self.max_buffer_size,
- callback=self._on_connect)
-
- def _get_ssl_options(self, scheme):
- if scheme == "https":
- if self.request.ssl_options is not None:
- return self.request.ssl_options
- # If we are using the defaults, don't construct a
- # new SSLContext.
- if (self.request.validate_cert and
- self.request.ca_certs is None and
- self.request.client_cert is None and
- self.request.client_key is None):
- return _client_ssl_defaults
- ssl_options = {}
- if self.request.validate_cert:
- ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
- if self.request.ca_certs is not None:
- ssl_options["ca_certs"] = self.request.ca_certs
- elif not hasattr(ssl, 'create_default_context'):
- # When create_default_context is present,
- # we can omit the "ca_certs" parameter entirely,
- # which avoids the dependency on "certifi" for py34.
- ssl_options["ca_certs"] = _default_ca_certs()
- if self.request.client_key is not None:
- ssl_options["keyfile"] = self.request.client_key
- if self.request.client_cert is not None:
- ssl_options["certfile"] = self.request.client_cert
-
- # SSL interoperability is tricky. We want to disable
- # SSLv2 for security reasons; it wasn't disabled by default
- # until openssl 1.0. The best way to do this is to use
- # the SSL_OP_NO_SSLv2, but that wasn't exposed to python
- # until 3.2. Python 2.7 adds the ciphers argument, which
- # can also be used to disable SSLv2. As a last resort
- # on python 2.6, we set ssl_version to TLSv1. This is
- # more narrow than we'd like since it also breaks
- # compatibility with servers configured for SSLv3 only,
- # but nearly all servers support both SSLv3 and TLSv1:
- # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
- if sys.version_info >= (2, 7):
- # In addition to disabling SSLv2, we also exclude certain
- # classes of insecure ciphers.
- ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES"
- else:
- # This is really only necessary for pre-1.0 versions
- # of openssl, but python 2.6 doesn't expose version
- # information.
- ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1
- return ssl_options
- return None
-
- def _on_timeout(self, info=None):
- """Timeout callback of _HTTPConnection instance.
-
- Raise a timeout HTTPError when a timeout occurs.
-
- :info string key: More detailed timeout information.
- """
- self._timeout = None
- error_message = "Timeout {0}".format(info) if info else "Timeout"
- if self.final_callback is not None:
- raise HTTPError(599, error_message)
-
- def _remove_timeout(self):
- if self._timeout is not None:
- self.io_loop.remove_timeout(self._timeout)
- self._timeout = None
-
- def _on_connect(self, stream):
- if self.final_callback is None:
- # final_callback is cleared if we've hit our timeout.
- stream.close()
- return
- self.stream = stream
- self.stream.set_close_callback(self.on_connection_close)
- self._remove_timeout()
- if self.final_callback is None:
- return
- if self.request.request_timeout:
- self._timeout = self.io_loop.add_timeout(
- self.start_time + self.request.request_timeout,
- stack_context.wrap(functools.partial(self._on_timeout, "during request")))
- if (self.request.method not in self._SUPPORTED_METHODS and
- not self.request.allow_nonstandard_methods):
- raise KeyError("unknown method %s" % self.request.method)
- for key in ('network_interface',
- 'proxy_host', 'proxy_port',
- 'proxy_username', 'proxy_password',
- 'proxy_auth_mode'):
- if getattr(self.request, key, None):
- raise NotImplementedError('%s not supported' % key)
- if "Connection" not in self.request.headers:
- self.request.headers["Connection"] = "close"
- if "Host" not in self.request.headers:
- if '@' in self.parsed.netloc:
- self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1]
- else:
- self.request.headers["Host"] = self.parsed.netloc
- username, password = None, None
- if self.parsed.username is not None:
- username, password = self.parsed.username, self.parsed.password
- elif self.request.auth_username is not None:
- username = self.request.auth_username
- password = self.request.auth_password or ''
- if username is not None:
- if self.request.auth_mode not in (None, "basic"):
- raise ValueError("unsupported auth_mode %s",
- self.request.auth_mode)
- auth = utf8(username) + b":" + utf8(password)
- self.request.headers["Authorization"] = (b"Basic " +
- base64.b64encode(auth))
- if self.request.user_agent:
- self.request.headers["User-Agent"] = self.request.user_agent
- if not self.request.allow_nonstandard_methods:
- # Some HTTP methods nearly always have bodies while others
- # almost never do. Fail in this case unless the user has
- # opted out of sanity checks with allow_nonstandard_methods.
- body_expected = self.request.method in ("POST", "PATCH", "PUT")
- body_present = (self.request.body is not None or
- self.request.body_producer is not None)
- if ((body_expected and not body_present) or
- (body_present and not body_expected)):
- raise ValueError(
- 'Body must %sbe None for method %s (unless '
- 'allow_nonstandard_methods is true)' %
- ('not ' if body_expected else '', self.request.method))
- if self.request.expect_100_continue:
- self.request.headers["Expect"] = "100-continue"
- if self.request.body is not None:
- # When body_producer is used the caller is responsible for
- # setting Content-Length (or else chunked encoding will be used).
- self.request.headers["Content-Length"] = str(len(
- self.request.body))
- if (self.request.method == "POST" and
- "Content-Type" not in self.request.headers):
- self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
- if self.request.decompress_response:
- self.request.headers["Accept-Encoding"] = "gzip"
- req_path = ((self.parsed.path or '/') +
- (('?' + self.parsed.query) if self.parsed.query else ''))
- self.connection = self._create_connection(stream)
- start_line = httputil.RequestStartLine(self.request.method,
- req_path, '')
- self.connection.write_headers(start_line, self.request.headers)
- if self.request.expect_100_continue:
- self._read_response()
- else:
- self._write_body(True)
-
- def _create_connection(self, stream):
- stream.set_nodelay(True)
- connection = HTTP1Connection(
- stream, True,
- HTTP1ConnectionParameters(
- no_keep_alive=True,
- max_header_size=self.max_header_size,
- max_body_size=self.max_body_size,
- decompress=self.request.decompress_response),
- self._sockaddr)
- return connection
-
- def _write_body(self, start_read):
- if self.request.body is not None:
- self.connection.write(self.request.body)
- elif self.request.body_producer is not None:
- fut = self.request.body_producer(self.connection.write)
- if fut is not None:
- fut = gen.convert_yielded(fut)
-
- def on_body_written(fut):
- fut.result()
- self.connection.finish()
- if start_read:
- self._read_response()
- self.io_loop.add_future(fut, on_body_written)
- return
- self.connection.finish()
- if start_read:
- self._read_response()
-
- def _read_response(self):
- # Ensure that any exception raised in read_response ends up in our
- # stack context.
- self.io_loop.add_future(
- self.connection.read_response(self),
- lambda f: f.result())
-
- def _release(self):
- if self.release_callback is not None:
- release_callback = self.release_callback
- self.release_callback = None
- release_callback()
-
- def _run_callback(self, response):
- self._release()
- if self.final_callback is not None:
- final_callback = self.final_callback
- self.final_callback = None
- self.io_loop.add_callback(final_callback, response)
-
- def _handle_exception(self, typ, value, tb):
- if self.final_callback:
- self._remove_timeout()
- if isinstance(value, StreamClosedError):
- if value.real_error is None:
- value = HTTPError(599, "Stream closed")
- else:
- value = value.real_error
- self._run_callback(HTTPResponse(self.request, 599, error=value,
- request_time=self.io_loop.time() - self.start_time,
- ))
-
- if hasattr(self, "stream"):
- # TODO: this may cause a StreamClosedError to be raised
- # by the connection's Future. Should we cancel the
- # connection more gracefully?
- self.stream.close()
- return True
- else:
- # If our callback has already been called, we are probably
- # catching an exception that is not caused by us but rather
- # some child of our callback. Rather than drop it on the floor,
- # pass it along, unless it's just the stream being closed.
- return isinstance(value, StreamClosedError)
-
- def on_connection_close(self):
- if self.final_callback is not None:
- message = "Connection closed"
- if self.stream.error:
- raise self.stream.error
- try:
- raise HTTPError(599, message)
- except HTTPError:
- self._handle_exception(*sys.exc_info())
-
- def headers_received(self, first_line, headers):
- if self.request.expect_100_continue and first_line.code == 100:
- self._write_body(False)
- return
- self.code = first_line.code
- self.reason = first_line.reason
- self.headers = headers
-
- if self._should_follow_redirect():
- return
-
- if self.request.header_callback is not None:
- # Reassemble the start line.
- self.request.header_callback('%s %s %s\r\n' % first_line)
- for k, v in self.headers.get_all():
- self.request.header_callback("%s: %s\r\n" % (k, v))
- self.request.header_callback('\r\n')
-
- def _should_follow_redirect(self):
- return (self.request.follow_redirects and
- self.request.max_redirects > 0 and
- self.code in (301, 302, 303, 307, 308))
-
- def finish(self):
- data = b''.join(self.chunks)
- self._remove_timeout()
- original_request = getattr(self.request, "original_request",
- self.request)
- if self._should_follow_redirect():
- assert isinstance(self.request, _RequestProxy)
- new_request = copy.copy(self.request.request)
- new_request.url = urlparse.urljoin(self.request.url,
- self.headers["Location"])
- new_request.max_redirects = self.request.max_redirects - 1
- del new_request.headers["Host"]
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
- # Client SHOULD make a GET request after a 303.
- # According to the spec, 302 should be followed by the same
- # method as the original request, but in practice browsers
- # treat 302 the same as 303, and many servers use 302 for
- # compatibility with pre-HTTP/1.1 user agents which don't
- # understand the 303 status.
- if self.code in (302, 303):
- new_request.method = "GET"
- new_request.body = None
- for h in ["Content-Length", "Content-Type",
- "Content-Encoding", "Transfer-Encoding"]:
- try:
- del self.request.headers[h]
- except KeyError:
- pass
- new_request.original_request = original_request
- final_callback = self.final_callback
- self.final_callback = None
- self._release()
- self.client.fetch(new_request, final_callback)
- self._on_end_request()
- return
- if self.request.streaming_callback:
- buffer = BytesIO()
- else:
- buffer = BytesIO(data) # TODO: don't require one big string?
- response = HTTPResponse(original_request,
- self.code, reason=getattr(self, 'reason', None),
- headers=self.headers,
- request_time=self.io_loop.time() - self.start_time,
- buffer=buffer,
- effective_url=self.request.url)
- self._run_callback(response)
- self._on_end_request()
-
- def _on_end_request(self):
- self.stream.close()
-
- def data_received(self, chunk):
- if self._should_follow_redirect():
- # We're going to follow a redirect so just discard the body.
- return
- if self.request.streaming_callback is not None:
- self.request.streaming_callback(chunk)
- else:
- self.chunks.append(chunk)
-
-
-if __name__ == "__main__":
- AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
- main()
+#!/usr/bin/env python
+from __future__ import absolute_import, division, print_function
+
+from tornado.escape import utf8, _unicode
+from tornado import gen
+from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
+from tornado import httputil
+from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
+from tornado.iostream import StreamClosedError
+from tornado.netutil import Resolver, OverrideResolver, _client_ssl_defaults
+from tornado.log import gen_log
+from tornado import stack_context
+from tornado.tcpclient import TCPClient
+from tornado.util import PY3
+
+import base64
+import collections
+import copy
+import functools
+import re
+import socket
+import sys
+from io import BytesIO
+
+
+if PY3:
+ import urllib.parse as urlparse
+else:
+ import urlparse
+
+try:
+ import ssl
+except ImportError:
+ # ssl is not available on Google App Engine.
+ ssl = None
+
+try:
+ import certifi
+except ImportError:
+ certifi = None
+
+
+def _default_ca_certs():
+ if certifi is None:
+ raise Exception("The 'certifi' package is required to use https "
+ "in simple_httpclient")
+ return certifi.where()
+
+
+class SimpleAsyncHTTPClient(AsyncHTTPClient):
+ """Non-blocking HTTP client with no external dependencies.
+
+ This class implements an HTTP 1.1 client on top of Tornado's IOStreams.
+ Some features found in the curl-based AsyncHTTPClient are not yet
+ supported. In particular, proxies are not supported, connections
+ are not reused, and callers cannot select the network interface to be
+ used.
+ """
+ def initialize(self, io_loop, max_clients=10,
+ hostname_mapping=None, max_buffer_size=104857600,
+ resolver=None, defaults=None, max_header_size=None,
+ max_body_size=None):
+ """Creates a AsyncHTTPClient.
+
+ Only a single AsyncHTTPClient instance exists per IOLoop
+ in order to provide limitations on the number of pending connections.
+ ``force_instance=True`` may be used to suppress this behavior.
+
+ Note that because of this implicit reuse, unless ``force_instance``
+ is used, only the first call to the constructor actually uses
+ its arguments. It is recommended to use the ``configure`` method
+ instead of the constructor to ensure that arguments take effect.
+
+ ``max_clients`` is the number of concurrent requests that can be
+ in progress; when this limit is reached additional requests will be
+ queued. Note that time spent waiting in this queue still counts
+ against the ``request_timeout``.
+
+ ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
+ It can be used to make local DNS changes when modifying system-wide
+ settings like ``/etc/hosts`` is not possible or desirable (e.g. in
+ unittests).
+
+ ``max_buffer_size`` (default 100MB) is the number of bytes
+ that can be read into memory at once. ``max_body_size``
+ (defaults to ``max_buffer_size``) is the largest response body
+ that the client will accept. Without a
+ ``streaming_callback``, the smaller of these two limits
+ applies; with a ``streaming_callback`` only ``max_body_size``
+ does.
+
+ .. versionchanged:: 4.2
+ Added the ``max_body_size`` argument.
+ """
+ super(SimpleAsyncHTTPClient, self).initialize(io_loop,
+ defaults=defaults)
+ self.max_clients = max_clients
+ self.queue = collections.deque()
+ self.active = {}
+ self.waiting = {}
+ self.max_buffer_size = max_buffer_size
+ self.max_header_size = max_header_size
+ self.max_body_size = max_body_size
+ # TCPClient could create a Resolver for us, but we have to do it
+ # ourselves to support hostname_mapping.
+ if resolver:
+ self.resolver = resolver
+ self.own_resolver = False
+ else:
+ self.resolver = Resolver(io_loop=io_loop)
+ self.own_resolver = True
+ if hostname_mapping is not None:
+ self.resolver = OverrideResolver(resolver=self.resolver,
+ mapping=hostname_mapping)
+ self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
+
+ def close(self):
+ super(SimpleAsyncHTTPClient, self).close()
+ if self.own_resolver:
+ self.resolver.close()
+ self.tcp_client.close()
+
+ def fetch_impl(self, request, callback):
+ key = object()
+ self.queue.append((key, request, callback))
+ if not len(self.active) < self.max_clients:
+ timeout_handle = self.io_loop.add_timeout(
+ self.io_loop.time() + min(request.connect_timeout,
+ request.request_timeout),
+ functools.partial(self._on_timeout, key, "in request queue"))
+ else:
+ timeout_handle = None
+ self.waiting[key] = (request, callback, timeout_handle)
+ self._process_queue()
+ if self.queue:
+ gen_log.debug("max_clients limit reached, request queued. "
+ "%d active, %d queued requests." % (
+ len(self.active), len(self.queue)))
+
+ def _process_queue(self):
+ with stack_context.NullContext():
+ while self.queue and len(self.active) < self.max_clients:
+ key, request, callback = self.queue.popleft()
+ if key not in self.waiting:
+ continue
+ self._remove_timeout(key)
+ self.active[key] = (request, callback)
+ release_callback = functools.partial(self._release_fetch, key)
+ self._handle_request(request, release_callback, callback)
+
+ def _connection_class(self):
+ return _HTTPConnection
+
+ def _handle_request(self, request, release_callback, final_callback):
+ self._connection_class()(
+ self.io_loop, self, request, release_callback,
+ final_callback, self.max_buffer_size, self.tcp_client,
+ self.max_header_size, self.max_body_size)
+
+ def _release_fetch(self, key):
+ del self.active[key]
+ self._process_queue()
+
+ def _remove_timeout(self, key):
+ if key in self.waiting:
+ request, callback, timeout_handle = self.waiting[key]
+ if timeout_handle is not None:
+ self.io_loop.remove_timeout(timeout_handle)
+ del self.waiting[key]
+
+ def _on_timeout(self, key, info=None):
+ """Timeout callback of request.
+
+ Construct a timeout HTTPResponse when a timeout occurs.
+
+ :arg object key: A simple object to mark the request.
+ :info string key: More detailed timeout information.
+ """
+ request, callback, timeout_handle = self.waiting[key]
+ self.queue.remove((key, request, callback))
+
+ error_message = "Timeout {0}".format(info) if info else "Timeout"
+ timeout_response = HTTPResponse(
+ request, 599, error=HTTPError(599, error_message),
+ request_time=self.io_loop.time() - request.start_time)
+ self.io_loop.add_callback(callback, timeout_response)
+ del self.waiting[key]
+
+
+class _HTTPConnection(httputil.HTTPMessageDelegate):
+ _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
+
+ def __init__(self, io_loop, client, request, release_callback,
+ final_callback, max_buffer_size, tcp_client,
+ max_header_size, max_body_size):
+ self.start_time = io_loop.time()
+ self.io_loop = io_loop
+ self.client = client
+ self.request = request
+ self.release_callback = release_callback
+ self.final_callback = final_callback
+ self.max_buffer_size = max_buffer_size
+ self.tcp_client = tcp_client
+ self.max_header_size = max_header_size
+ self.max_body_size = max_body_size
+ self.code = None
+ self.headers = None
+ self.chunks = []
+ self._decompressor = None
+ # Timeout handle returned by IOLoop.add_timeout
+ self._timeout = None
+ self._sockaddr = None
+ with stack_context.ExceptionStackContext(self._handle_exception):
+ self.parsed = urlparse.urlsplit(_unicode(self.request.url))
+ if self.parsed.scheme not in ("http", "https"):
+ raise ValueError("Unsupported url scheme: %s" %
+ self.request.url)
+ # urlsplit results have hostname and port results, but they
+ # didn't support ipv6 literals until python 2.7.
+ netloc = self.parsed.netloc
+ if "@" in netloc:
+ userpass, _, netloc = netloc.rpartition("@")
+ host, port = httputil.split_host_and_port(netloc)
+ if port is None:
+ port = 443 if self.parsed.scheme == "https" else 80
+ if re.match(r'^\[.*\]$', host):
+ # raw ipv6 addresses in urls are enclosed in brackets
+ host = host[1:-1]
+ self.parsed_hostname = host # save final host for _on_connect
+
+ if request.allow_ipv6 is False:
+ af = socket.AF_INET
+ else:
+ af = socket.AF_UNSPEC
+
+ ssl_options = self._get_ssl_options(self.parsed.scheme)
+
+ timeout = min(self.request.connect_timeout, self.request.request_timeout)
+ if timeout:
+ self._timeout = self.io_loop.add_timeout(
+ self.start_time + timeout,
+ stack_context.wrap(functools.partial(self._on_timeout, "while connecting")))
+ self.tcp_client.connect(host, port, af=af,
+ ssl_options=ssl_options,
+ max_buffer_size=self.max_buffer_size,
+ callback=self._on_connect)
+
+ def _get_ssl_options(self, scheme):
+ if scheme == "https":
+ if self.request.ssl_options is not None:
+ return self.request.ssl_options
+ # If we are using the defaults, don't construct a
+ # new SSLContext.
+ if (self.request.validate_cert and
+ self.request.ca_certs is None and
+ self.request.client_cert is None and
+ self.request.client_key is None):
+ return _client_ssl_defaults
+ ssl_options = {}
+ if self.request.validate_cert:
+ ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
+ if self.request.ca_certs is not None:
+ ssl_options["ca_certs"] = self.request.ca_certs
+ elif not hasattr(ssl, 'create_default_context'):
+ # When create_default_context is present,
+ # we can omit the "ca_certs" parameter entirely,
+ # which avoids the dependency on "certifi" for py34.
+ ssl_options["ca_certs"] = _default_ca_certs()
+ if self.request.client_key is not None:
+ ssl_options["keyfile"] = self.request.client_key
+ if self.request.client_cert is not None:
+ ssl_options["certfile"] = self.request.client_cert
+
+ # SSL interoperability is tricky. We want to disable
+ # SSLv2 for security reasons; it wasn't disabled by default
+ # until openssl 1.0. The best way to do this is to use
+ # the SSL_OP_NO_SSLv2, but that wasn't exposed to python
+ # until 3.2. Python 2.7 adds the ciphers argument, which
+ # can also be used to disable SSLv2. As a last resort
+ # on python 2.6, we set ssl_version to TLSv1. This is
+ # more narrow than we'd like since it also breaks
+ # compatibility with servers configured for SSLv3 only,
+ # but nearly all servers support both SSLv3 and TLSv1:
+ # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
+ if sys.version_info >= (2, 7):
+ # In addition to disabling SSLv2, we also exclude certain
+ # classes of insecure ciphers.
+ ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES"
+ else:
+ # This is really only necessary for pre-1.0 versions
+ # of openssl, but python 2.6 doesn't expose version
+ # information.
+ ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1
+ return ssl_options
+ return None
+
+ def _on_timeout(self, info=None):
+ """Timeout callback of _HTTPConnection instance.
+
+ Raise a timeout HTTPError when a timeout occurs.
+
+ :info string key: More detailed timeout information.
+ """
+ self._timeout = None
+ error_message = "Timeout {0}".format(info) if info else "Timeout"
+ if self.final_callback is not None:
+ raise HTTPError(599, error_message)
+
+ def _remove_timeout(self):
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = None
+
+ def _on_connect(self, stream):
+ if self.final_callback is None:
+ # final_callback is cleared if we've hit our timeout.
+ stream.close()
+ return
+ self.stream = stream
+ self.stream.set_close_callback(self.on_connection_close)
+ self._remove_timeout()
+ if self.final_callback is None:
+ return
+ if self.request.request_timeout:
+ self._timeout = self.io_loop.add_timeout(
+ self.start_time + self.request.request_timeout,
+ stack_context.wrap(functools.partial(self._on_timeout, "during request")))
+ if (self.request.method not in self._SUPPORTED_METHODS and
+ not self.request.allow_nonstandard_methods):
+ raise KeyError("unknown method %s" % self.request.method)
+ for key in ('network_interface',
+ 'proxy_host', 'proxy_port',
+ 'proxy_username', 'proxy_password',
+ 'proxy_auth_mode'):
+ if getattr(self.request, key, None):
+ raise NotImplementedError('%s not supported' % key)
+ if "Connection" not in self.request.headers:
+ self.request.headers["Connection"] = "close"
+ if "Host" not in self.request.headers:
+ if '@' in self.parsed.netloc:
+ self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1]
+ else:
+ self.request.headers["Host"] = self.parsed.netloc
+ username, password = None, None
+ if self.parsed.username is not None:
+ username, password = self.parsed.username, self.parsed.password
+ elif self.request.auth_username is not None:
+ username = self.request.auth_username
+ password = self.request.auth_password or ''
+ if username is not None:
+ if self.request.auth_mode not in (None, "basic"):
+ raise ValueError("unsupported auth_mode %s",
+ self.request.auth_mode)
+ auth = utf8(username) + b":" + utf8(password)
+ self.request.headers["Authorization"] = (b"Basic " +
+ base64.b64encode(auth))
+ if self.request.user_agent:
+ self.request.headers["User-Agent"] = self.request.user_agent
+ if not self.request.allow_nonstandard_methods:
+ # Some HTTP methods nearly always have bodies while others
+ # almost never do. Fail in this case unless the user has
+ # opted out of sanity checks with allow_nonstandard_methods.
+ body_expected = self.request.method in ("POST", "PATCH", "PUT")
+ body_present = (self.request.body is not None or
+ self.request.body_producer is not None)
+ if ((body_expected and not body_present) or
+ (body_present and not body_expected)):
+ raise ValueError(
+ 'Body must %sbe None for method %s (unless '
+ 'allow_nonstandard_methods is true)' %
+ ('not ' if body_expected else '', self.request.method))
+ if self.request.expect_100_continue:
+ self.request.headers["Expect"] = "100-continue"
+ if self.request.body is not None:
+ # When body_producer is used the caller is responsible for
+ # setting Content-Length (or else chunked encoding will be used).
+ self.request.headers["Content-Length"] = str(len(
+ self.request.body))
+ if (self.request.method == "POST" and
+ "Content-Type" not in self.request.headers):
+ self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
+ if self.request.decompress_response:
+ self.request.headers["Accept-Encoding"] = "gzip"
+ req_path = ((self.parsed.path or '/') +
+ (('?' + self.parsed.query) if self.parsed.query else ''))
+ self.connection = self._create_connection(stream)
+ start_line = httputil.RequestStartLine(self.request.method,
+ req_path, '')
+ self.connection.write_headers(start_line, self.request.headers)
+ if self.request.expect_100_continue:
+ self._read_response()
+ else:
+ self._write_body(True)
+
+ def _create_connection(self, stream):
+ stream.set_nodelay(True)
+ connection = HTTP1Connection(
+ stream, True,
+ HTTP1ConnectionParameters(
+ no_keep_alive=True,
+ max_header_size=self.max_header_size,
+ max_body_size=self.max_body_size,
+ decompress=self.request.decompress_response),
+ self._sockaddr)
+ return connection
+
+ def _write_body(self, start_read):
+ if self.request.body is not None:
+ self.connection.write(self.request.body)
+ elif self.request.body_producer is not None:
+ fut = self.request.body_producer(self.connection.write)
+ if fut is not None:
+ fut = gen.convert_yielded(fut)
+
+ def on_body_written(fut):
+ fut.result()
+ self.connection.finish()
+ if start_read:
+ self._read_response()
+ self.io_loop.add_future(fut, on_body_written)
+ return
+ self.connection.finish()
+ if start_read:
+ self._read_response()
+
+ def _read_response(self):
+ # Ensure that any exception raised in read_response ends up in our
+ # stack context.
+ self.io_loop.add_future(
+ self.connection.read_response(self),
+ lambda f: f.result())
+
+ def _release(self):
+ if self.release_callback is not None:
+ release_callback = self.release_callback
+ self.release_callback = None
+ release_callback()
+
+ def _run_callback(self, response):
+ self._release()
+ if self.final_callback is not None:
+ final_callback = self.final_callback
+ self.final_callback = None
+ self.io_loop.add_callback(final_callback, response)
+
+ def _handle_exception(self, typ, value, tb):
+ if self.final_callback:
+ self._remove_timeout()
+ if isinstance(value, StreamClosedError):
+ if value.real_error is None:
+ value = HTTPError(599, "Stream closed")
+ else:
+ value = value.real_error
+ self._run_callback(HTTPResponse(self.request, 599, error=value,
+ request_time=self.io_loop.time() - self.start_time,
+ ))
+
+ if hasattr(self, "stream"):
+ # TODO: this may cause a StreamClosedError to be raised
+ # by the connection's Future. Should we cancel the
+ # connection more gracefully?
+ self.stream.close()
+ return True
+ else:
+ # If our callback has already been called, we are probably
+ # catching an exception that is not caused by us but rather
+ # some child of our callback. Rather than drop it on the floor,
+ # pass it along, unless it's just the stream being closed.
+ return isinstance(value, StreamClosedError)
+
+ def on_connection_close(self):
+ if self.final_callback is not None:
+ message = "Connection closed"
+ if self.stream.error:
+ raise self.stream.error
+ try:
+ raise HTTPError(599, message)
+ except HTTPError:
+ self._handle_exception(*sys.exc_info())
+
+ def headers_received(self, first_line, headers):
+ if self.request.expect_100_continue and first_line.code == 100:
+ self._write_body(False)
+ return
+ self.code = first_line.code
+ self.reason = first_line.reason
+ self.headers = headers
+
+ if self._should_follow_redirect():
+ return
+
+ if self.request.header_callback is not None:
+ # Reassemble the start line.
+ self.request.header_callback('%s %s %s\r\n' % first_line)
+ for k, v in self.headers.get_all():
+ self.request.header_callback("%s: %s\r\n" % (k, v))
+ self.request.header_callback('\r\n')
+
+ def _should_follow_redirect(self):
+ return (self.request.follow_redirects and
+ self.request.max_redirects > 0 and
+ self.code in (301, 302, 303, 307, 308))
+
+ def finish(self):
+ data = b''.join(self.chunks)
+ self._remove_timeout()
+ original_request = getattr(self.request, "original_request",
+ self.request)
+ if self._should_follow_redirect():
+ assert isinstance(self.request, _RequestProxy)
+ new_request = copy.copy(self.request.request)
+ new_request.url = urlparse.urljoin(self.request.url,
+ self.headers["Location"])
+ new_request.max_redirects = self.request.max_redirects - 1
+ del new_request.headers["Host"]
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
+ # Client SHOULD make a GET request after a 303.
+ # According to the spec, 302 should be followed by the same
+ # method as the original request, but in practice browsers
+ # treat 302 the same as 303, and many servers use 302 for
+ # compatibility with pre-HTTP/1.1 user agents which don't
+ # understand the 303 status.
+ if self.code in (302, 303):
+ new_request.method = "GET"
+ new_request.body = None
+ for h in ["Content-Length", "Content-Type",
+ "Content-Encoding", "Transfer-Encoding"]:
+ try:
+ del self.request.headers[h]
+ except KeyError:
+ pass
+ new_request.original_request = original_request
+ final_callback = self.final_callback
+ self.final_callback = None
+ self._release()
+ self.client.fetch(new_request, final_callback)
+ self._on_end_request()
+ return
+ if self.request.streaming_callback:
+ buffer = BytesIO()
+ else:
+ buffer = BytesIO(data) # TODO: don't require one big string?
+ response = HTTPResponse(original_request,
+ self.code, reason=getattr(self, 'reason', None),
+ headers=self.headers,
+ request_time=self.io_loop.time() - self.start_time,
+ buffer=buffer,
+ effective_url=self.request.url)
+ self._run_callback(response)
+ self._on_end_request()
+
+ def _on_end_request(self):
+ self.stream.close()
+
+ def data_received(self, chunk):
+ if self._should_follow_redirect():
+ # We're going to follow a redirect so just discard the body.
+ return
+ if self.request.streaming_callback is not None:
+ self.request.streaming_callback(chunk)
+ else:
+ self.chunks.append(chunk)
+
+
+if __name__ == "__main__":
+ AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
+ main()
diff --git a/contrib/python/tornado/tornado-4/tornado/speedups.c b/contrib/python/tornado/tornado-4/tornado/speedups.c
index c59bda0092..bea15523ff 100644
--- a/contrib/python/tornado/tornado-4/tornado/speedups.c
+++ b/contrib/python/tornado/tornado-4/tornado/speedups.c
@@ -1,52 +1,52 @@
-#define PY_SSIZE_T_CLEAN
-#include <Python.h>
-
-static PyObject* websocket_mask(PyObject* self, PyObject* args) {
- const char* mask;
- Py_ssize_t mask_len;
- const char* data;
- Py_ssize_t data_len;
- Py_ssize_t i;
- PyObject* result;
- char* buf;
-
- if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) {
- return NULL;
- }
-
- result = PyBytes_FromStringAndSize(NULL, data_len);
- if (!result) {
- return NULL;
- }
- buf = PyBytes_AsString(result);
- for (i = 0; i < data_len; i++) {
- buf[i] = data[i] ^ mask[i % 4];
- }
-
- return result;
-}
-
-static PyMethodDef methods[] = {
- {"websocket_mask", websocket_mask, METH_VARARGS, ""},
- {NULL, NULL, 0, NULL}
-};
-
-#if PY_MAJOR_VERSION >= 3
-static struct PyModuleDef speedupsmodule = {
- PyModuleDef_HEAD_INIT,
- "speedups",
- NULL,
- -1,
- methods
-};
-
-PyMODINIT_FUNC
-PyInit_speedups(void) {
- return PyModule_Create(&speedupsmodule);
-}
-#else // Python 2.x
-PyMODINIT_FUNC
-initspeedups(void) {
- Py_InitModule("tornado.speedups", methods);
-}
-#endif
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+static PyObject* websocket_mask(PyObject* self, PyObject* args) {
+ const char* mask;
+ Py_ssize_t mask_len;
+ const char* data;
+ Py_ssize_t data_len;
+ Py_ssize_t i;
+ PyObject* result;
+ char* buf;
+
+ if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) {
+ return NULL;
+ }
+
+ result = PyBytes_FromStringAndSize(NULL, data_len);
+ if (!result) {
+ return NULL;
+ }
+ buf = PyBytes_AsString(result);
+ for (i = 0; i < data_len; i++) {
+ buf[i] = data[i] ^ mask[i % 4];
+ }
+
+ return result;
+}
+
+static PyMethodDef methods[] = {
+ {"websocket_mask", websocket_mask, METH_VARARGS, ""},
+ {NULL, NULL, 0, NULL}
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef speedupsmodule = {
+ PyModuleDef_HEAD_INIT,
+ "speedups",
+ NULL,
+ -1,
+ methods
+};
+
+PyMODINIT_FUNC
+PyInit_speedups(void) {
+ return PyModule_Create(&speedupsmodule);
+}
+#else // Python 2.x
+PyMODINIT_FUNC
+initspeedups(void) {
+ Py_InitModule("tornado.speedups", methods);
+}
+#endif
diff --git a/contrib/python/tornado/tornado-4/tornado/stack_context.py b/contrib/python/tornado/tornado-4/tornado/stack_context.py
index 61ae51f4eb..3081121329 100644
--- a/contrib/python/tornado/tornado-4/tornado/stack_context.py
+++ b/contrib/python/tornado/tornado-4/tornado/stack_context.py
@@ -1,390 +1,390 @@
-#!/usr/bin/env python
-#
-# Copyright 2010 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""`StackContext` allows applications to maintain threadlocal-like state
-that follows execution as it moves to other execution contexts.
-
-The motivating examples are to eliminate the need for explicit
-``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
-allow some additional context to be kept for logging.
-
-This is slightly magic, but it's an extension of the idea that an
-exception handler is a kind of stack-local state and when that stack
-is suspended and resumed in a new context that state needs to be
-preserved. `StackContext` shifts the burden of restoring that state
-from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
-in ``async_callback``) to the mechanisms that transfer control from
-one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
-thread pools, etc).
-
-Example usage::
-
- @contextlib.contextmanager
- def die_on_error():
- try:
- yield
- except Exception:
- logging.error("exception in asynchronous operation",exc_info=True)
- sys.exit(1)
-
- with StackContext(die_on_error):
- # Any exception thrown here *or in callback and its descendants*
- # will cause the process to exit instead of spinning endlessly
- # in the ioloop.
- http_client.fetch(url, callback)
- ioloop.start()
-
-Most applications shouldn't have to work with `StackContext` directly.
-Here are a few rules of thumb for when it's necessary:
-
-* If you're writing an asynchronous library that doesn't rely on a
- stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
- (for example, if you're writing a thread pool), use
- `.stack_context.wrap()` before any asynchronous operations to capture the
- stack context from where the operation was started.
-
-* If you're writing an asynchronous library that has some shared
- resources (such as a connection pool), create those shared resources
- within a ``with stack_context.NullContext():`` block. This will prevent
- ``StackContexts`` from leaking from one request to another.
-
-* If you want to write something like an exception handler that will
- persist across asynchronous calls, create a new `StackContext` (or
- `ExceptionStackContext`), and make your asynchronous calls in a ``with``
- block that references your `StackContext`.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import sys
-import threading
-
-from tornado.util import raise_exc_info
-
-
-class StackContextInconsistentError(Exception):
- pass
-
-
-class _State(threading.local):
- def __init__(self):
- self.contexts = (tuple(), None)
-
-
-_state = _State()
-
-
-class StackContext(object):
- """Establishes the given context as a StackContext that will be transferred.
-
- Note that the parameter is a callable that returns a context
- manager, not the context itself. That is, where for a
- non-transferable context manager you would say::
-
- with my_context():
-
- StackContext takes the function itself rather than its result::
-
- with StackContext(my_context):
-
- The result of ``with StackContext() as cb:`` is a deactivation
- callback. Run this callback when the StackContext is no longer
- needed to ensure that it is not propagated any further (note that
- deactivating a context does not affect any instances of that
- context that are currently pending). This is an advanced feature
- and not necessary in most applications.
- """
- def __init__(self, context_factory):
- self.context_factory = context_factory
- self.contexts = []
- self.active = True
-
- def _deactivate(self):
- self.active = False
-
- # StackContext protocol
- def enter(self):
- context = self.context_factory()
- self.contexts.append(context)
- context.__enter__()
-
- def exit(self, type, value, traceback):
- context = self.contexts.pop()
- context.__exit__(type, value, traceback)
-
- # Note that some of this code is duplicated in ExceptionStackContext
- # below. ExceptionStackContext is more common and doesn't need
- # the full generality of this class.
- def __enter__(self):
- self.old_contexts = _state.contexts
- self.new_contexts = (self.old_contexts[0] + (self,), self)
- _state.contexts = self.new_contexts
-
- try:
- self.enter()
- except:
- _state.contexts = self.old_contexts
- raise
-
- return self._deactivate
-
- def __exit__(self, type, value, traceback):
- try:
- self.exit(type, value, traceback)
- finally:
- final_contexts = _state.contexts
- _state.contexts = self.old_contexts
-
- # Generator coroutines and with-statements with non-local
- # effects interact badly. Check here for signs of
- # the stack getting out of sync.
- # Note that this check comes after restoring _state.context
- # so that if it fails things are left in a (relatively)
- # consistent state.
- if final_contexts is not self.new_contexts:
- raise StackContextInconsistentError(
- 'stack_context inconsistency (may be caused by yield '
- 'within a "with StackContext" block)')
-
- # Break up a reference to itself to allow for faster GC on CPython.
- self.new_contexts = None
-
-
-class ExceptionStackContext(object):
- """Specialization of StackContext for exception handling.
-
- The supplied ``exception_handler`` function will be called in the
- event of an uncaught exception in this context. The semantics are
- similar to a try/finally clause, and intended use cases are to log
- an error, close a socket, or similar cleanup actions. The
- ``exc_info`` triple ``(type, value, traceback)`` will be passed to the
- exception_handler function.
-
- If the exception handler returns true, the exception will be
- consumed and will not be propagated to other exception handlers.
- """
- def __init__(self, exception_handler):
- self.exception_handler = exception_handler
- self.active = True
-
- def _deactivate(self):
- self.active = False
-
- def exit(self, type, value, traceback):
- if type is not None:
- return self.exception_handler(type, value, traceback)
-
- def __enter__(self):
- self.old_contexts = _state.contexts
- self.new_contexts = (self.old_contexts[0], self)
- _state.contexts = self.new_contexts
-
- return self._deactivate
-
- def __exit__(self, type, value, traceback):
- try:
- if type is not None:
- return self.exception_handler(type, value, traceback)
- finally:
- final_contexts = _state.contexts
- _state.contexts = self.old_contexts
-
- if final_contexts is not self.new_contexts:
- raise StackContextInconsistentError(
- 'stack_context inconsistency (may be caused by yield '
- 'within a "with StackContext" block)')
-
- # Break up a reference to itself to allow for faster GC on CPython.
- self.new_contexts = None
-
-
-class NullContext(object):
- """Resets the `StackContext`.
-
- Useful when creating a shared resource on demand (e.g. an
- `.AsyncHTTPClient`) where the stack that caused the creating is
- not relevant to future operations.
- """
- def __enter__(self):
- self.old_contexts = _state.contexts
- _state.contexts = (tuple(), None)
-
- def __exit__(self, type, value, traceback):
- _state.contexts = self.old_contexts
-
-
-def _remove_deactivated(contexts):
- """Remove deactivated handlers from the chain"""
- # Clean ctx handlers
- stack_contexts = tuple([h for h in contexts[0] if h.active])
-
- # Find new head
- head = contexts[1]
- while head is not None and not head.active:
- head = head.old_contexts[1]
-
- # Process chain
- ctx = head
- while ctx is not None:
- parent = ctx.old_contexts[1]
-
- while parent is not None:
- if parent.active:
- break
- ctx.old_contexts = parent.old_contexts
- parent = parent.old_contexts[1]
-
- ctx = parent
-
- return (stack_contexts, head)
-
-
-def wrap(fn):
- """Returns a callable object that will restore the current `StackContext`
- when executed.
-
- Use this whenever saving a callback to be executed later in a
- different execution context (either in a different thread or
- asynchronously in the same thread).
- """
- # Check if function is already wrapped
- if fn is None or hasattr(fn, '_wrapped'):
- return fn
-
- # Capture current stack head
- # TODO: Any other better way to store contexts and update them in wrapped function?
- cap_contexts = [_state.contexts]
-
- if not cap_contexts[0][0] and not cap_contexts[0][1]:
- # Fast path when there are no active contexts.
- def null_wrapper(*args, **kwargs):
- try:
- current_state = _state.contexts
- _state.contexts = cap_contexts[0]
- return fn(*args, **kwargs)
- finally:
- _state.contexts = current_state
- null_wrapper._wrapped = True
- return null_wrapper
-
- def wrapped(*args, **kwargs):
- ret = None
- try:
- # Capture old state
- current_state = _state.contexts
-
- # Remove deactivated items
- cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
-
- # Force new state
- _state.contexts = contexts
-
- # Current exception
- exc = (None, None, None)
- top = None
-
- # Apply stack contexts
- last_ctx = 0
- stack = contexts[0]
-
- # Apply state
- for n in stack:
- try:
- n.enter()
- last_ctx += 1
- except:
- # Exception happened. Record exception info and store top-most handler
- exc = sys.exc_info()
- top = n.old_contexts[1]
-
- # Execute callback if no exception happened while restoring state
- if top is None:
- try:
- ret = fn(*args, **kwargs)
- except:
- exc = sys.exc_info()
- top = contexts[1]
-
- # If there was exception, try to handle it by going through the exception chain
- if top is not None:
- exc = _handle_exception(top, exc)
- else:
- # Otherwise take shorter path and run stack contexts in reverse order
- while last_ctx > 0:
- last_ctx -= 1
- c = stack[last_ctx]
-
- try:
- c.exit(*exc)
- except:
- exc = sys.exc_info()
- top = c.old_contexts[1]
- break
- else:
- top = None
-
- # If if exception happened while unrolling, take longer exception handler path
- if top is not None:
- exc = _handle_exception(top, exc)
-
- # If exception was not handled, raise it
- if exc != (None, None, None):
- raise_exc_info(exc)
- finally:
- _state.contexts = current_state
- return ret
-
- wrapped._wrapped = True
- return wrapped
-
-
-def _handle_exception(tail, exc):
- while tail is not None:
- try:
- if tail.exit(*exc):
- exc = (None, None, None)
- except:
- exc = sys.exc_info()
-
- tail = tail.old_contexts[1]
-
- return exc
-
-
-def run_with_stack_context(context, func):
- """Run a coroutine ``func`` in the given `StackContext`.
-
- It is not safe to have a ``yield`` statement within a ``with StackContext``
- block, so it is difficult to use stack context with `.gen.coroutine`.
- This helper function runs the function in the correct context while
- keeping the ``yield`` and ``with`` statements syntactically separate.
-
- Example::
-
- @gen.coroutine
- def incorrect():
- with StackContext(ctx):
- # ERROR: this will raise StackContextInconsistentError
- yield other_coroutine()
-
- @gen.coroutine
- def correct():
- yield run_with_stack_context(StackContext(ctx), other_coroutine)
-
- .. versionadded:: 3.1
- """
- with context:
- return func()
+#!/usr/bin/env python
+#
+# Copyright 2010 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""`StackContext` allows applications to maintain threadlocal-like state
+that follows execution as it moves to other execution contexts.
+
+The motivating examples are to eliminate the need for explicit
+``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
+allow some additional context to be kept for logging.
+
+This is slightly magic, but it's an extension of the idea that an
+exception handler is a kind of stack-local state and when that stack
+is suspended and resumed in a new context that state needs to be
+preserved. `StackContext` shifts the burden of restoring that state
+from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
+in ``async_callback``) to the mechanisms that transfer control from
+one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
+thread pools, etc).
+
+Example usage::
+
+ @contextlib.contextmanager
+ def die_on_error():
+ try:
+ yield
+ except Exception:
+ logging.error("exception in asynchronous operation",exc_info=True)
+ sys.exit(1)
+
+ with StackContext(die_on_error):
+ # Any exception thrown here *or in callback and its descendants*
+ # will cause the process to exit instead of spinning endlessly
+ # in the ioloop.
+ http_client.fetch(url, callback)
+ ioloop.start()
+
+Most applications shouldn't have to work with `StackContext` directly.
+Here are a few rules of thumb for when it's necessary:
+
+* If you're writing an asynchronous library that doesn't rely on a
+ stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
+ (for example, if you're writing a thread pool), use
+ `.stack_context.wrap()` before any asynchronous operations to capture the
+ stack context from where the operation was started.
+
+* If you're writing an asynchronous library that has some shared
+ resources (such as a connection pool), create those shared resources
+ within a ``with stack_context.NullContext():`` block. This will prevent
+ ``StackContexts`` from leaking from one request to another.
+
+* If you want to write something like an exception handler that will
+ persist across asynchronous calls, create a new `StackContext` (or
+ `ExceptionStackContext`), and make your asynchronous calls in a ``with``
+ block that references your `StackContext`.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+import threading
+
+from tornado.util import raise_exc_info
+
+
+class StackContextInconsistentError(Exception):
+ pass
+
+
+class _State(threading.local):
+ def __init__(self):
+ self.contexts = (tuple(), None)
+
+
+_state = _State()
+
+
+class StackContext(object):
+ """Establishes the given context as a StackContext that will be transferred.
+
+ Note that the parameter is a callable that returns a context
+ manager, not the context itself. That is, where for a
+ non-transferable context manager you would say::
+
+ with my_context():
+
+ StackContext takes the function itself rather than its result::
+
+ with StackContext(my_context):
+
+ The result of ``with StackContext() as cb:`` is a deactivation
+ callback. Run this callback when the StackContext is no longer
+ needed to ensure that it is not propagated any further (note that
+ deactivating a context does not affect any instances of that
+ context that are currently pending). This is an advanced feature
+ and not necessary in most applications.
+ """
+ def __init__(self, context_factory):
+ self.context_factory = context_factory
+ self.contexts = []
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ # StackContext protocol
+ def enter(self):
+ context = self.context_factory()
+ self.contexts.append(context)
+ context.__enter__()
+
+ def exit(self, type, value, traceback):
+ context = self.contexts.pop()
+ context.__exit__(type, value, traceback)
+
+ # Note that some of this code is duplicated in ExceptionStackContext
+ # below. ExceptionStackContext is more common and doesn't need
+ # the full generality of this class.
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0] + (self,), self)
+ _state.contexts = self.new_contexts
+
+ try:
+ self.enter()
+ except:
+ _state.contexts = self.old_contexts
+ raise
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.exit(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ # Generator coroutines and with-statements with non-local
+ # effects interact badly. Check here for signs of
+ # the stack getting out of sync.
+ # Note that this check comes after restoring _state.context
+ # so that if it fails things are left in a (relatively)
+ # consistent state.
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class ExceptionStackContext(object):
+ """Specialization of StackContext for exception handling.
+
+ The supplied ``exception_handler`` function will be called in the
+ event of an uncaught exception in this context. The semantics are
+ similar to a try/finally clause, and intended use cases are to log
+ an error, close a socket, or similar cleanup actions. The
+ ``exc_info`` triple ``(type, value, traceback)`` will be passed to the
+ exception_handler function.
+
+ If the exception handler returns true, the exception will be
+ consumed and will not be propagated to other exception handlers.
+ """
+ def __init__(self, exception_handler):
+ self.exception_handler = exception_handler
+ self.active = True
+
+ def _deactivate(self):
+ self.active = False
+
+ def exit(self, type, value, traceback):
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ self.new_contexts = (self.old_contexts[0], self)
+ _state.contexts = self.new_contexts
+
+ return self._deactivate
+
+ def __exit__(self, type, value, traceback):
+ try:
+ if type is not None:
+ return self.exception_handler(type, value, traceback)
+ finally:
+ final_contexts = _state.contexts
+ _state.contexts = self.old_contexts
+
+ if final_contexts is not self.new_contexts:
+ raise StackContextInconsistentError(
+ 'stack_context inconsistency (may be caused by yield '
+ 'within a "with StackContext" block)')
+
+ # Break up a reference to itself to allow for faster GC on CPython.
+ self.new_contexts = None
+
+
+class NullContext(object):
+ """Resets the `StackContext`.
+
+ Useful when creating a shared resource on demand (e.g. an
+ `.AsyncHTTPClient`) where the stack that caused the creating is
+ not relevant to future operations.
+ """
+ def __enter__(self):
+ self.old_contexts = _state.contexts
+ _state.contexts = (tuple(), None)
+
+ def __exit__(self, type, value, traceback):
+ _state.contexts = self.old_contexts
+
+
+def _remove_deactivated(contexts):
+ """Remove deactivated handlers from the chain"""
+ # Clean ctx handlers
+ stack_contexts = tuple([h for h in contexts[0] if h.active])
+
+ # Find new head
+ head = contexts[1]
+ while head is not None and not head.active:
+ head = head.old_contexts[1]
+
+ # Process chain
+ ctx = head
+ while ctx is not None:
+ parent = ctx.old_contexts[1]
+
+ while parent is not None:
+ if parent.active:
+ break
+ ctx.old_contexts = parent.old_contexts
+ parent = parent.old_contexts[1]
+
+ ctx = parent
+
+ return (stack_contexts, head)
+
+
+def wrap(fn):
+ """Returns a callable object that will restore the current `StackContext`
+ when executed.
+
+ Use this whenever saving a callback to be executed later in a
+ different execution context (either in a different thread or
+ asynchronously in the same thread).
+ """
+ # Check if function is already wrapped
+ if fn is None or hasattr(fn, '_wrapped'):
+ return fn
+
+ # Capture current stack head
+ # TODO: Any other better way to store contexts and update them in wrapped function?
+ cap_contexts = [_state.contexts]
+
+ if not cap_contexts[0][0] and not cap_contexts[0][1]:
+ # Fast path when there are no active contexts.
+ def null_wrapper(*args, **kwargs):
+ try:
+ current_state = _state.contexts
+ _state.contexts = cap_contexts[0]
+ return fn(*args, **kwargs)
+ finally:
+ _state.contexts = current_state
+ null_wrapper._wrapped = True
+ return null_wrapper
+
+ def wrapped(*args, **kwargs):
+ ret = None
+ try:
+ # Capture old state
+ current_state = _state.contexts
+
+ # Remove deactivated items
+ cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
+
+ # Force new state
+ _state.contexts = contexts
+
+ # Current exception
+ exc = (None, None, None)
+ top = None
+
+ # Apply stack contexts
+ last_ctx = 0
+ stack = contexts[0]
+
+ # Apply state
+ for n in stack:
+ try:
+ n.enter()
+ last_ctx += 1
+ except:
+ # Exception happened. Record exception info and store top-most handler
+ exc = sys.exc_info()
+ top = n.old_contexts[1]
+
+ # Execute callback if no exception happened while restoring state
+ if top is None:
+ try:
+ ret = fn(*args, **kwargs)
+ except:
+ exc = sys.exc_info()
+ top = contexts[1]
+
+ # If there was exception, try to handle it by going through the exception chain
+ if top is not None:
+ exc = _handle_exception(top, exc)
+ else:
+ # Otherwise take shorter path and run stack contexts in reverse order
+ while last_ctx > 0:
+ last_ctx -= 1
+ c = stack[last_ctx]
+
+ try:
+ c.exit(*exc)
+ except:
+ exc = sys.exc_info()
+ top = c.old_contexts[1]
+ break
+ else:
+ top = None
+
+ # If if exception happened while unrolling, take longer exception handler path
+ if top is not None:
+ exc = _handle_exception(top, exc)
+
+ # If exception was not handled, raise it
+ if exc != (None, None, None):
+ raise_exc_info(exc)
+ finally:
+ _state.contexts = current_state
+ return ret
+
+ wrapped._wrapped = True
+ return wrapped
+
+
+def _handle_exception(tail, exc):
+ while tail is not None:
+ try:
+ if tail.exit(*exc):
+ exc = (None, None, None)
+ except:
+ exc = sys.exc_info()
+
+ tail = tail.old_contexts[1]
+
+ return exc
+
+
+def run_with_stack_context(context, func):
+ """Run a coroutine ``func`` in the given `StackContext`.
+
+ It is not safe to have a ``yield`` statement within a ``with StackContext``
+ block, so it is difficult to use stack context with `.gen.coroutine`.
+ This helper function runs the function in the correct context while
+ keeping the ``yield`` and ``with`` statements syntactically separate.
+
+ Example::
+
+ @gen.coroutine
+ def incorrect():
+ with StackContext(ctx):
+ # ERROR: this will raise StackContextInconsistentError
+ yield other_coroutine()
+
+ @gen.coroutine
+ def correct():
+ yield run_with_stack_context(StackContext(ctx), other_coroutine)
+
+ .. versionadded:: 3.1
+ """
+ with context:
+ return func()
diff --git a/contrib/python/tornado/tornado-4/tornado/tcpclient.py b/contrib/python/tornado/tornado-4/tornado/tcpclient.py
index bb5e9f347e..bf928d5c6e 100644
--- a/contrib/python/tornado/tornado-4/tornado/tcpclient.py
+++ b/contrib/python/tornado/tornado-4/tornado/tcpclient.py
@@ -1,224 +1,224 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A non-blocking TCP connection factory.
-"""
-from __future__ import absolute_import, division, print_function
-
-import functools
-import socket
-
-from tornado.concurrent import Future
-from tornado.ioloop import IOLoop
-from tornado.iostream import IOStream
-from tornado import gen
-from tornado.netutil import Resolver
-from tornado.platform.auto import set_close_exec
-
-_INITIAL_CONNECT_TIMEOUT = 0.3
-
-
-class _Connector(object):
- """A stateless implementation of the "Happy Eyeballs" algorithm.
-
- "Happy Eyeballs" is documented in RFC6555 as the recommended practice
- for when both IPv4 and IPv6 addresses are available.
-
- In this implementation, we partition the addresses by family, and
- make the first connection attempt to whichever address was
- returned first by ``getaddrinfo``. If that connection fails or
- times out, we begin a connection in parallel to the first address
- of the other family. If there are additional failures we retry
- with other addresses, keeping one connection attempt per family
- in flight at a time.
-
- http://tools.ietf.org/html/rfc6555
-
- """
- def __init__(self, addrinfo, io_loop, connect):
- self.io_loop = io_loop
- self.connect = connect
-
- self.future = Future()
- self.timeout = None
- self.last_error = None
- self.remaining = len(addrinfo)
- self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
-
- @staticmethod
- def split(addrinfo):
- """Partition the ``addrinfo`` list by address family.
-
- Returns two lists. The first list contains the first entry from
- ``addrinfo`` and all others with the same family, and the
- second list contains all other addresses (normally one list will
- be AF_INET and the other AF_INET6, although non-standard resolvers
- may return additional families).
- """
- primary = []
- secondary = []
- primary_af = addrinfo[0][0]
- for af, addr in addrinfo:
- if af == primary_af:
- primary.append((af, addr))
- else:
- secondary.append((af, addr))
- return primary, secondary
-
- def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
- self.try_connect(iter(self.primary_addrs))
- self.set_timout(timeout)
- return self.future
-
- def try_connect(self, addrs):
- try:
- af, addr = next(addrs)
- except StopIteration:
- # We've reached the end of our queue, but the other queue
- # might still be working. Send a final error on the future
- # only when both queues are finished.
- if self.remaining == 0 and not self.future.done():
- self.future.set_exception(self.last_error or
- IOError("connection failed"))
- return
- future = self.connect(af, addr)
- future.add_done_callback(functools.partial(self.on_connect_done,
- addrs, af, addr))
-
- def on_connect_done(self, addrs, af, addr, future):
- self.remaining -= 1
- try:
- stream = future.result()
- except Exception as e:
- if self.future.done():
- return
- # Error: try again (but remember what happened so we have an
- # error to raise in the end)
- self.last_error = e
- self.try_connect(addrs)
- if self.timeout is not None:
- # If the first attempt failed, don't wait for the
- # timeout to try an address from the secondary queue.
- self.io_loop.remove_timeout(self.timeout)
- self.on_timeout()
- return
- self.clear_timeout()
- if self.future.done():
- # This is a late arrival; just drop it.
- stream.close()
- else:
- self.future.set_result((af, addr, stream))
-
- def set_timout(self, timeout):
- self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
- self.on_timeout)
-
- def on_timeout(self):
- self.timeout = None
- self.try_connect(iter(self.secondary_addrs))
-
- def clear_timeout(self):
- if self.timeout is not None:
- self.io_loop.remove_timeout(self.timeout)
-
-
-class TCPClient(object):
- """A non-blocking TCP connection factory.
-
- .. versionchanged:: 4.1
- The ``io_loop`` argument is deprecated.
- """
- def __init__(self, resolver=None, io_loop=None):
- self.io_loop = io_loop or IOLoop.current()
- if resolver is not None:
- self.resolver = resolver
- self._own_resolver = False
- else:
- self.resolver = Resolver(io_loop=io_loop)
- self._own_resolver = True
-
- def close(self):
- if self._own_resolver:
- self.resolver.close()
-
- @gen.coroutine
- def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
- max_buffer_size=None, source_ip=None, source_port=None):
- """Connect to the given host and port.
-
- Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
- ``ssl_options`` is not None).
-
- Using the ``source_ip`` kwarg, one can specify the source
- IP address to use when establishing the connection.
- In case the user needs to resolve and
- use a specific interface, it has to be handled outside
- of Tornado as this depends very much on the platform.
-
- Similarly, when the user requires a certain source port, it can
- be specified using the ``source_port`` arg.
-
- .. versionchanged:: 4.5
- Added the ``source_ip`` and ``source_port`` arguments.
- """
- addrinfo = yield self.resolver.resolve(host, port, af)
- connector = _Connector(
- addrinfo, self.io_loop,
- functools.partial(self._create_stream, max_buffer_size,
- source_ip=source_ip, source_port=source_port)
- )
- af, addr, stream = yield connector.start()
- # TODO: For better performance we could cache the (af, addr)
- # information here and re-use it on subsequent connections to
- # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
- if ssl_options is not None:
- stream = yield stream.start_tls(False, ssl_options=ssl_options,
- server_hostname=host)
- raise gen.Return(stream)
-
- def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
- source_port=None):
- # Always connect in plaintext; we'll convert to ssl if necessary
- # after one connection has completed.
- source_port_bind = source_port if isinstance(source_port, int) else 0
- source_ip_bind = source_ip
- if source_port_bind and not source_ip:
- # User required a specific port, but did not specify
- # a certain source IP, will bind to the default loopback.
- source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
- # Trying to use the same address family as the requested af socket:
- # - 127.0.0.1 for IPv4
- # - ::1 for IPv6
- socket_obj = socket.socket(af)
- set_close_exec(socket_obj.fileno())
- if source_port_bind or source_ip_bind:
- # If the user requires binding also to a specific IP/port.
- try:
- socket_obj.bind((source_ip_bind, source_port_bind))
- except socket.error:
- socket_obj.close()
- # Fail loudly if unable to use the IP/port.
- raise
- try:
- stream = IOStream(socket_obj,
- io_loop=self.io_loop,
- max_buffer_size=max_buffer_size)
- except socket.error as e:
- fu = Future()
- fu.set_exception(e)
- return fu
- else:
- return stream.connect(addr)
+#!/usr/bin/env python
+#
+# Copyright 2014 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A non-blocking TCP connection factory.
+"""
+from __future__ import absolute_import, division, print_function
+
+import functools
+import socket
+
+from tornado.concurrent import Future
+from tornado.ioloop import IOLoop
+from tornado.iostream import IOStream
+from tornado import gen
+from tornado.netutil import Resolver
+from tornado.platform.auto import set_close_exec
+
+_INITIAL_CONNECT_TIMEOUT = 0.3
+
+
+class _Connector(object):
+ """A stateless implementation of the "Happy Eyeballs" algorithm.
+
+ "Happy Eyeballs" is documented in RFC6555 as the recommended practice
+ for when both IPv4 and IPv6 addresses are available.
+
+ In this implementation, we partition the addresses by family, and
+ make the first connection attempt to whichever address was
+ returned first by ``getaddrinfo``. If that connection fails or
+ times out, we begin a connection in parallel to the first address
+ of the other family. If there are additional failures we retry
+ with other addresses, keeping one connection attempt per family
+ in flight at a time.
+
+ http://tools.ietf.org/html/rfc6555
+
+ """
+ def __init__(self, addrinfo, io_loop, connect):
+ self.io_loop = io_loop
+ self.connect = connect
+
+ self.future = Future()
+ self.timeout = None
+ self.last_error = None
+ self.remaining = len(addrinfo)
+ self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
+
+ @staticmethod
+ def split(addrinfo):
+ """Partition the ``addrinfo`` list by address family.
+
+ Returns two lists. The first list contains the first entry from
+ ``addrinfo`` and all others with the same family, and the
+ second list contains all other addresses (normally one list will
+ be AF_INET and the other AF_INET6, although non-standard resolvers
+ may return additional families).
+ """
+ primary = []
+ secondary = []
+ primary_af = addrinfo[0][0]
+ for af, addr in addrinfo:
+ if af == primary_af:
+ primary.append((af, addr))
+ else:
+ secondary.append((af, addr))
+ return primary, secondary
+
+ def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
+ self.try_connect(iter(self.primary_addrs))
+ self.set_timout(timeout)
+ return self.future
+
+ def try_connect(self, addrs):
+ try:
+ af, addr = next(addrs)
+ except StopIteration:
+ # We've reached the end of our queue, but the other queue
+ # might still be working. Send a final error on the future
+ # only when both queues are finished.
+ if self.remaining == 0 and not self.future.done():
+ self.future.set_exception(self.last_error or
+ IOError("connection failed"))
+ return
+ future = self.connect(af, addr)
+ future.add_done_callback(functools.partial(self.on_connect_done,
+ addrs, af, addr))
+
+ def on_connect_done(self, addrs, af, addr, future):
+ self.remaining -= 1
+ try:
+ stream = future.result()
+ except Exception as e:
+ if self.future.done():
+ return
+ # Error: try again (but remember what happened so we have an
+ # error to raise in the end)
+ self.last_error = e
+ self.try_connect(addrs)
+ if self.timeout is not None:
+ # If the first attempt failed, don't wait for the
+ # timeout to try an address from the secondary queue.
+ self.io_loop.remove_timeout(self.timeout)
+ self.on_timeout()
+ return
+ self.clear_timeout()
+ if self.future.done():
+ # This is a late arrival; just drop it.
+ stream.close()
+ else:
+ self.future.set_result((af, addr, stream))
+
+ def set_timout(self, timeout):
+ self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
+ self.on_timeout)
+
+ def on_timeout(self):
+ self.timeout = None
+ self.try_connect(iter(self.secondary_addrs))
+
+ def clear_timeout(self):
+ if self.timeout is not None:
+ self.io_loop.remove_timeout(self.timeout)
+
+
+class TCPClient(object):
+ """A non-blocking TCP connection factory.
+
+ .. versionchanged:: 4.1
+ The ``io_loop`` argument is deprecated.
+ """
+ def __init__(self, resolver=None, io_loop=None):
+ self.io_loop = io_loop or IOLoop.current()
+ if resolver is not None:
+ self.resolver = resolver
+ self._own_resolver = False
+ else:
+ self.resolver = Resolver(io_loop=io_loop)
+ self._own_resolver = True
+
+ def close(self):
+ if self._own_resolver:
+ self.resolver.close()
+
+ @gen.coroutine
+ def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
+ max_buffer_size=None, source_ip=None, source_port=None):
+ """Connect to the given host and port.
+
+ Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
+ ``ssl_options`` is not None).
+
+ Using the ``source_ip`` kwarg, one can specify the source
+ IP address to use when establishing the connection.
+ In case the user needs to resolve and
+ use a specific interface, it has to be handled outside
+ of Tornado as this depends very much on the platform.
+
+ Similarly, when the user requires a certain source port, it can
+ be specified using the ``source_port`` arg.
+
+ .. versionchanged:: 4.5
+ Added the ``source_ip`` and ``source_port`` arguments.
+ """
+ addrinfo = yield self.resolver.resolve(host, port, af)
+ connector = _Connector(
+ addrinfo, self.io_loop,
+ functools.partial(self._create_stream, max_buffer_size,
+ source_ip=source_ip, source_port=source_port)
+ )
+ af, addr, stream = yield connector.start()
+ # TODO: For better performance we could cache the (af, addr)
+ # information here and re-use it on subsequent connections to
+ # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
+ if ssl_options is not None:
+ stream = yield stream.start_tls(False, ssl_options=ssl_options,
+ server_hostname=host)
+ raise gen.Return(stream)
+
+ def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
+ source_port=None):
+ # Always connect in plaintext; we'll convert to ssl if necessary
+ # after one connection has completed.
+ source_port_bind = source_port if isinstance(source_port, int) else 0
+ source_ip_bind = source_ip
+ if source_port_bind and not source_ip:
+ # User required a specific port, but did not specify
+ # a certain source IP, will bind to the default loopback.
+ source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
+ # Trying to use the same address family as the requested af socket:
+ # - 127.0.0.1 for IPv4
+ # - ::1 for IPv6
+ socket_obj = socket.socket(af)
+ set_close_exec(socket_obj.fileno())
+ if source_port_bind or source_ip_bind:
+ # If the user requires binding also to a specific IP/port.
+ try:
+ socket_obj.bind((source_ip_bind, source_port_bind))
+ except socket.error:
+ socket_obj.close()
+ # Fail loudly if unable to use the IP/port.
+ raise
+ try:
+ stream = IOStream(socket_obj,
+ io_loop=self.io_loop,
+ max_buffer_size=max_buffer_size)
+ except socket.error as e:
+ fu = Future()
+ fu.set_exception(e)
+ return fu
+ else:
+ return stream.connect(addr)
diff --git a/contrib/python/tornado/tornado-4/tornado/tcpserver.py b/contrib/python/tornado/tornado-4/tornado/tcpserver.py
index f47ec89a42..9ea1784bda 100644
--- a/contrib/python/tornado/tornado-4/tornado/tcpserver.py
+++ b/contrib/python/tornado/tornado-4/tornado/tcpserver.py
@@ -1,300 +1,300 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A non-blocking, single-threaded TCP server."""
-from __future__ import absolute_import, division, print_function
-
-import errno
-import os
-import socket
-
-from tornado import gen
-from tornado.log import app_log
-from tornado.ioloop import IOLoop
-from tornado.iostream import IOStream, SSLIOStream
-from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
-from tornado import process
-from tornado.util import errno_from_exception
-
-try:
- import ssl
-except ImportError:
- # ssl is not available on Google App Engine.
- ssl = None
-
-
-class TCPServer(object):
- r"""A non-blocking, single-threaded TCP server.
-
- To use `TCPServer`, define a subclass which overrides the `handle_stream`
- method. For example, a simple echo server could be defined like this::
-
- from tornado.tcpserver import TCPServer
- from tornado.iostream import StreamClosedError
- from tornado import gen
-
- class EchoServer(TCPServer):
- @gen.coroutine
- def handle_stream(self, stream, address):
- while True:
- try:
- data = yield stream.read_until(b"\n")
- yield stream.write(data)
- except StreamClosedError:
- break
-
- To make this server serve SSL traffic, send the ``ssl_options`` keyword
- argument with an `ssl.SSLContext` object. For compatibility with older
- versions of Python ``ssl_options`` may also be a dictionary of keyword
- arguments for the `ssl.wrap_socket` method.::
-
- ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
- ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
- os.path.join(data_dir, "mydomain.key"))
- TCPServer(ssl_options=ssl_ctx)
-
- `TCPServer` initialization follows one of three patterns:
-
- 1. `listen`: simple single-process::
-
- server = TCPServer()
- server.listen(8888)
- IOLoop.current().start()
-
- 2. `bind`/`start`: simple multi-process::
-
- server = TCPServer()
- server.bind(8888)
- server.start(0) # Forks multiple sub-processes
- IOLoop.current().start()
-
- When using this interface, an `.IOLoop` must *not* be passed
- to the `TCPServer` constructor. `start` will always start
- the server on the default singleton `.IOLoop`.
-
- 3. `add_sockets`: advanced multi-process::
-
- sockets = bind_sockets(8888)
- tornado.process.fork_processes(0)
- server = TCPServer()
- server.add_sockets(sockets)
- IOLoop.current().start()
-
- The `add_sockets` interface is more complicated, but it can be
- used with `tornado.process.fork_processes` to give you more
- flexibility in when the fork happens. `add_sockets` can
- also be used in single-process servers if you want to create
- your listening sockets in some way other than
- `~tornado.netutil.bind_sockets`.
-
- .. versionadded:: 3.1
- The ``max_buffer_size`` argument.
- """
- def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None,
- read_chunk_size=None):
- self.io_loop = io_loop
- self.ssl_options = ssl_options
- self._sockets = {} # fd -> socket object
- self._pending_sockets = []
- self._started = False
- self._stopped = False
- self.max_buffer_size = max_buffer_size
- self.read_chunk_size = read_chunk_size
-
- # Verify the SSL options. Otherwise we don't get errors until clients
- # connect. This doesn't verify that the keys are legitimate, but
- # the SSL module doesn't do that until there is a connected socket
- # which seems like too much work
- if self.ssl_options is not None and isinstance(self.ssl_options, dict):
- # Only certfile is required: it can contain both keys
- if 'certfile' not in self.ssl_options:
- raise KeyError('missing key "certfile" in ssl_options')
-
- if not os.path.exists(self.ssl_options['certfile']):
- raise ValueError('certfile "%s" does not exist' %
- self.ssl_options['certfile'])
- if ('keyfile' in self.ssl_options and
- not os.path.exists(self.ssl_options['keyfile'])):
- raise ValueError('keyfile "%s" does not exist' %
- self.ssl_options['keyfile'])
-
- def listen(self, port, address=""):
- """Starts accepting connections on the given port.
-
- This method may be called more than once to listen on multiple ports.
- `listen` takes effect immediately; it is not necessary to call
- `TCPServer.start` afterwards. It is, however, necessary to start
- the `.IOLoop`.
- """
- sockets = bind_sockets(port, address=address)
- self.add_sockets(sockets)
-
- def add_sockets(self, sockets):
- """Makes this server start accepting connections on the given sockets.
-
- The ``sockets`` parameter is a list of socket objects such as
- those returned by `~tornado.netutil.bind_sockets`.
- `add_sockets` is typically used in combination with that
- method and `tornado.process.fork_processes` to provide greater
- control over the initialization of a multi-process server.
- """
- if self.io_loop is None:
- self.io_loop = IOLoop.current()
-
- for sock in sockets:
- self._sockets[sock.fileno()] = sock
- add_accept_handler(sock, self._handle_connection,
- io_loop=self.io_loop)
-
- def add_socket(self, socket):
- """Singular version of `add_sockets`. Takes a single socket object."""
- self.add_sockets([socket])
-
- def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128,
- reuse_port=False):
- """Binds this server to the given port on the given address.
-
- To start the server, call `start`. If you want to run this server
- in a single process, you can call `listen` as a shortcut to the
- sequence of `bind` and `start` calls.
-
- Address may be either an IP address or hostname. If it's a hostname,
- the server will listen on all IP addresses associated with the
- name. Address may be an empty string or None to listen on all
- available interfaces. Family may be set to either `socket.AF_INET`
- or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
- both will be used if available.
-
- The ``backlog`` argument has the same meaning as for
- `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument
- has the same meaning as for `.bind_sockets`.
-
- This method may be called multiple times prior to `start` to listen
- on multiple ports or interfaces.
-
- .. versionchanged:: 4.4
- Added the ``reuse_port`` argument.
- """
- sockets = bind_sockets(port, address=address, family=family,
- backlog=backlog, reuse_port=reuse_port)
- if self._started:
- self.add_sockets(sockets)
- else:
- self._pending_sockets.extend(sockets)
-
- def start(self, num_processes=1):
- """Starts this server in the `.IOLoop`.
-
- By default, we run the server in this process and do not fork any
- additional child process.
-
- If num_processes is ``None`` or <= 0, we detect the number of cores
- available on this machine and fork that number of child
- processes. If num_processes is given and > 1, we fork that
- specific number of sub-processes.
-
- Since we use processes and not threads, there is no shared memory
- between any server code.
-
- Note that multiple processes are not compatible with the autoreload
- module (or the ``autoreload=True`` option to `tornado.web.Application`
- which defaults to True when ``debug=True``).
- When using multiple processes, no IOLoops can be created or
- referenced until after the call to ``TCPServer.start(n)``.
- """
- assert not self._started
- self._started = True
- if num_processes != 1:
- process.fork_processes(num_processes)
- sockets = self._pending_sockets
- self._pending_sockets = []
- self.add_sockets(sockets)
-
- def stop(self):
- """Stops listening for new connections.
-
- Requests currently in progress may still continue after the
- server is stopped.
- """
- if self._stopped:
- return
- self._stopped = True
- for fd, sock in self._sockets.items():
- assert sock.fileno() == fd
- self.io_loop.remove_handler(fd)
- sock.close()
-
- def handle_stream(self, stream, address):
- """Override to handle a new `.IOStream` from an incoming connection.
-
- This method may be a coroutine; if so any exceptions it raises
- asynchronously will be logged. Accepting of incoming connections
- will not be blocked by this coroutine.
-
- If this `TCPServer` is configured for SSL, ``handle_stream``
- may be called before the SSL handshake has completed. Use
- `.SSLIOStream.wait_for_handshake` if you need to verify the client's
- certificate or use NPN/ALPN.
-
- .. versionchanged:: 4.2
- Added the option for this method to be a coroutine.
- """
- raise NotImplementedError()
-
- def _handle_connection(self, connection, address):
- if self.ssl_options is not None:
- assert ssl, "Python 2.6+ and OpenSSL required for SSL"
- try:
- connection = ssl_wrap_socket(connection,
- self.ssl_options,
- server_side=True,
- do_handshake_on_connect=False)
- except ssl.SSLError as err:
- if err.args[0] == ssl.SSL_ERROR_EOF:
- return connection.close()
- else:
- raise
- except socket.error as err:
- # If the connection is closed immediately after it is created
- # (as in a port scan), we can get one of several errors.
- # wrap_socket makes an internal call to getpeername,
- # which may return either EINVAL (Mac OS X) or ENOTCONN
- # (Linux). If it returns ENOTCONN, this error is
- # silently swallowed by the ssl module, so we need to
- # catch another error later on (AttributeError in
- # SSLIOStream._do_ssl_handshake).
- # To test this behavior, try nmap with the -sT flag.
- # https://github.com/tornadoweb/tornado/pull/750
- if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
- return connection.close()
- else:
- raise
- try:
- if self.ssl_options is not None:
- stream = SSLIOStream(connection, io_loop=self.io_loop,
- max_buffer_size=self.max_buffer_size,
- read_chunk_size=self.read_chunk_size)
- else:
- stream = IOStream(connection, io_loop=self.io_loop,
- max_buffer_size=self.max_buffer_size,
- read_chunk_size=self.read_chunk_size)
-
- future = self.handle_stream(stream, address)
- if future is not None:
- self.io_loop.add_future(gen.convert_yielded(future),
- lambda f: f.result())
- except Exception:
- app_log.error("Error in connection callback", exc_info=True)
+#!/usr/bin/env python
+#
+# Copyright 2011 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A non-blocking, single-threaded TCP server."""
+from __future__ import absolute_import, division, print_function
+
+import errno
+import os
+import socket
+
+from tornado import gen
+from tornado.log import app_log
+from tornado.ioloop import IOLoop
+from tornado.iostream import IOStream, SSLIOStream
+from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
+from tornado import process
+from tornado.util import errno_from_exception
+
+try:
+ import ssl
+except ImportError:
+ # ssl is not available on Google App Engine.
+ ssl = None
+
+
+class TCPServer(object):
+ r"""A non-blocking, single-threaded TCP server.
+
+ To use `TCPServer`, define a subclass which overrides the `handle_stream`
+ method. For example, a simple echo server could be defined like this::
+
+ from tornado.tcpserver import TCPServer
+ from tornado.iostream import StreamClosedError
+ from tornado import gen
+
+ class EchoServer(TCPServer):
+ @gen.coroutine
+ def handle_stream(self, stream, address):
+ while True:
+ try:
+ data = yield stream.read_until(b"\n")
+ yield stream.write(data)
+ except StreamClosedError:
+ break
+
+ To make this server serve SSL traffic, send the ``ssl_options`` keyword
+ argument with an `ssl.SSLContext` object. For compatibility with older
+ versions of Python ``ssl_options`` may also be a dictionary of keyword
+ arguments for the `ssl.wrap_socket` method.::
+
+ ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
+ os.path.join(data_dir, "mydomain.key"))
+ TCPServer(ssl_options=ssl_ctx)
+
+ `TCPServer` initialization follows one of three patterns:
+
+ 1. `listen`: simple single-process::
+
+ server = TCPServer()
+ server.listen(8888)
+ IOLoop.current().start()
+
+ 2. `bind`/`start`: simple multi-process::
+
+ server = TCPServer()
+ server.bind(8888)
+ server.start(0) # Forks multiple sub-processes
+ IOLoop.current().start()
+
+ When using this interface, an `.IOLoop` must *not* be passed
+ to the `TCPServer` constructor. `start` will always start
+ the server on the default singleton `.IOLoop`.
+
+ 3. `add_sockets`: advanced multi-process::
+
+ sockets = bind_sockets(8888)
+ tornado.process.fork_processes(0)
+ server = TCPServer()
+ server.add_sockets(sockets)
+ IOLoop.current().start()
+
+ The `add_sockets` interface is more complicated, but it can be
+ used with `tornado.process.fork_processes` to give you more
+ flexibility in when the fork happens. `add_sockets` can
+ also be used in single-process servers if you want to create
+ your listening sockets in some way other than
+ `~tornado.netutil.bind_sockets`.
+
+ .. versionadded:: 3.1
+ The ``max_buffer_size`` argument.
+ """
+ def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None,
+ read_chunk_size=None):
+ self.io_loop = io_loop
+ self.ssl_options = ssl_options
+ self._sockets = {} # fd -> socket object
+ self._pending_sockets = []
+ self._started = False
+ self._stopped = False
+ self.max_buffer_size = max_buffer_size
+ self.read_chunk_size = read_chunk_size
+
+ # Verify the SSL options. Otherwise we don't get errors until clients
+ # connect. This doesn't verify that the keys are legitimate, but
+ # the SSL module doesn't do that until there is a connected socket
+ # which seems like too much work
+ if self.ssl_options is not None and isinstance(self.ssl_options, dict):
+ # Only certfile is required: it can contain both keys
+ if 'certfile' not in self.ssl_options:
+ raise KeyError('missing key "certfile" in ssl_options')
+
+ if not os.path.exists(self.ssl_options['certfile']):
+ raise ValueError('certfile "%s" does not exist' %
+ self.ssl_options['certfile'])
+ if ('keyfile' in self.ssl_options and
+ not os.path.exists(self.ssl_options['keyfile'])):
+ raise ValueError('keyfile "%s" does not exist' %
+ self.ssl_options['keyfile'])
+
+ def listen(self, port, address=""):
+ """Starts accepting connections on the given port.
+
+ This method may be called more than once to listen on multiple ports.
+ `listen` takes effect immediately; it is not necessary to call
+ `TCPServer.start` afterwards. It is, however, necessary to start
+ the `.IOLoop`.
+ """
+ sockets = bind_sockets(port, address=address)
+ self.add_sockets(sockets)
+
+ def add_sockets(self, sockets):
+ """Makes this server start accepting connections on the given sockets.
+
+ The ``sockets`` parameter is a list of socket objects such as
+ those returned by `~tornado.netutil.bind_sockets`.
+ `add_sockets` is typically used in combination with that
+ method and `tornado.process.fork_processes` to provide greater
+ control over the initialization of a multi-process server.
+ """
+ if self.io_loop is None:
+ self.io_loop = IOLoop.current()
+
+ for sock in sockets:
+ self._sockets[sock.fileno()] = sock
+ add_accept_handler(sock, self._handle_connection,
+ io_loop=self.io_loop)
+
+ def add_socket(self, socket):
+ """Singular version of `add_sockets`. Takes a single socket object."""
+ self.add_sockets([socket])
+
+ def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128,
+ reuse_port=False):
+ """Binds this server to the given port on the given address.
+
+ To start the server, call `start`. If you want to run this server
+ in a single process, you can call `listen` as a shortcut to the
+ sequence of `bind` and `start` calls.
+
+ Address may be either an IP address or hostname. If it's a hostname,
+ the server will listen on all IP addresses associated with the
+ name. Address may be an empty string or None to listen on all
+ available interfaces. Family may be set to either `socket.AF_INET`
+ or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
+ both will be used if available.
+
+ The ``backlog`` argument has the same meaning as for
+ `socket.listen <socket.socket.listen>`. The ``reuse_port`` argument
+ has the same meaning as for `.bind_sockets`.
+
+ This method may be called multiple times prior to `start` to listen
+ on multiple ports or interfaces.
+
+ .. versionchanged:: 4.4
+ Added the ``reuse_port`` argument.
+ """
+ sockets = bind_sockets(port, address=address, family=family,
+ backlog=backlog, reuse_port=reuse_port)
+ if self._started:
+ self.add_sockets(sockets)
+ else:
+ self._pending_sockets.extend(sockets)
+
+ def start(self, num_processes=1):
+ """Starts this server in the `.IOLoop`.
+
+ By default, we run the server in this process and do not fork any
+ additional child process.
+
+ If num_processes is ``None`` or <= 0, we detect the number of cores
+ available on this machine and fork that number of child
+ processes. If num_processes is given and > 1, we fork that
+ specific number of sub-processes.
+
+ Since we use processes and not threads, there is no shared memory
+ between any server code.
+
+ Note that multiple processes are not compatible with the autoreload
+ module (or the ``autoreload=True`` option to `tornado.web.Application`
+ which defaults to True when ``debug=True``).
+ When using multiple processes, no IOLoops can be created or
+ referenced until after the call to ``TCPServer.start(n)``.
+ """
+ assert not self._started
+ self._started = True
+ if num_processes != 1:
+ process.fork_processes(num_processes)
+ sockets = self._pending_sockets
+ self._pending_sockets = []
+ self.add_sockets(sockets)
+
+ def stop(self):
+ """Stops listening for new connections.
+
+ Requests currently in progress may still continue after the
+ server is stopped.
+ """
+ if self._stopped:
+ return
+ self._stopped = True
+ for fd, sock in self._sockets.items():
+ assert sock.fileno() == fd
+ self.io_loop.remove_handler(fd)
+ sock.close()
+
+ def handle_stream(self, stream, address):
+ """Override to handle a new `.IOStream` from an incoming connection.
+
+ This method may be a coroutine; if so any exceptions it raises
+ asynchronously will be logged. Accepting of incoming connections
+ will not be blocked by this coroutine.
+
+ If this `TCPServer` is configured for SSL, ``handle_stream``
+ may be called before the SSL handshake has completed. Use
+ `.SSLIOStream.wait_for_handshake` if you need to verify the client's
+ certificate or use NPN/ALPN.
+
+ .. versionchanged:: 4.2
+ Added the option for this method to be a coroutine.
+ """
+ raise NotImplementedError()
+
+ def _handle_connection(self, connection, address):
+ if self.ssl_options is not None:
+ assert ssl, "Python 2.6+ and OpenSSL required for SSL"
+ try:
+ connection = ssl_wrap_socket(connection,
+ self.ssl_options,
+ server_side=True,
+ do_handshake_on_connect=False)
+ except ssl.SSLError as err:
+ if err.args[0] == ssl.SSL_ERROR_EOF:
+ return connection.close()
+ else:
+ raise
+ except socket.error as err:
+ # If the connection is closed immediately after it is created
+ # (as in a port scan), we can get one of several errors.
+ # wrap_socket makes an internal call to getpeername,
+ # which may return either EINVAL (Mac OS X) or ENOTCONN
+ # (Linux). If it returns ENOTCONN, this error is
+ # silently swallowed by the ssl module, so we need to
+ # catch another error later on (AttributeError in
+ # SSLIOStream._do_ssl_handshake).
+ # To test this behavior, try nmap with the -sT flag.
+ # https://github.com/tornadoweb/tornado/pull/750
+ if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
+ return connection.close()
+ else:
+ raise
+ try:
+ if self.ssl_options is not None:
+ stream = SSLIOStream(connection, io_loop=self.io_loop,
+ max_buffer_size=self.max_buffer_size,
+ read_chunk_size=self.read_chunk_size)
+ else:
+ stream = IOStream(connection, io_loop=self.io_loop,
+ max_buffer_size=self.max_buffer_size,
+ read_chunk_size=self.read_chunk_size)
+
+ future = self.handle_stream(stream, address)
+ if future is not None:
+ self.io_loop.add_future(gen.convert_yielded(future),
+ lambda f: f.result())
+ except Exception:
+ app_log.error("Error in connection callback", exc_info=True)
diff --git a/contrib/python/tornado/tornado-4/tornado/template.py b/contrib/python/tornado/tornado-4/tornado/template.py
index 3b2fa3feef..10c14d53c9 100644
--- a/contrib/python/tornado/tornado-4/tornado/template.py
+++ b/contrib/python/tornado/tornado-4/tornado/template.py
@@ -1,978 +1,978 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A simple template system that compiles templates to Python code.
-
-Basic usage looks like::
-
- t = template.Template("<html>{{ myvalue }}</html>")
- print(t.generate(myvalue="XXX"))
-
-`Loader` is a class that loads templates from a root directory and caches
-the compiled templates::
-
- loader = template.Loader("/home/btaylor")
- print(loader.load("test.html").generate(myvalue="XXX"))
-
-We compile all templates to raw Python. Error-reporting is currently... uh,
-interesting. Syntax for the templates::
-
- ### base.html
- <html>
- <head>
- <title>{% block title %}Default title{% end %}</title>
- </head>
- <body>
- <ul>
- {% for student in students %}
- {% block student %}
- <li>{{ escape(student.name) }}</li>
- {% end %}
- {% end %}
- </ul>
- </body>
- </html>
-
- ### bold.html
- {% extends "base.html" %}
-
- {% block title %}A bolder title{% end %}
-
- {% block student %}
- <li><span style="bold">{{ escape(student.name) }}</span></li>
- {% end %}
-
-Unlike most other template systems, we do not put any restrictions on the
-expressions you can include in your statements. ``if`` and ``for`` blocks get
-translated exactly into Python, so you can do complex expressions like::
-
- {% for student in [p for p in people if p.student and p.age > 23] %}
- <li>{{ escape(student.name) }}</li>
- {% end %}
-
-Translating directly to Python means you can apply functions to expressions
-easily, like the ``escape()`` function in the examples above. You can pass
-functions in to your template just like any other variable
-(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
-
- ### Python code
- def add(x, y):
- return x + y
- template.execute(add=add)
-
- ### The template
- {{ add(1, 2) }}
-
-We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
-`.json_encode()`, and `.squeeze()` to all templates by default.
-
-Typical applications do not create `Template` or `Loader` instances by
-hand, but instead use the `~.RequestHandler.render` and
-`~.RequestHandler.render_string` methods of
-`tornado.web.RequestHandler`, which load templates automatically based
-on the ``template_path`` `.Application` setting.
-
-Variable names beginning with ``_tt_`` are reserved by the template
-system and should not be used by application code.
-
-Syntax Reference
-----------------
-
-Template expressions are surrounded by double curly braces: ``{{ ... }}``.
-The contents may be any python expression, which will be escaped according
-to the current autoescape setting and inserted into the output. Other
-template directives use ``{% %}``.
-
-To comment out a section so that it is omitted from the output, surround it
-with ``{# ... #}``.
-
-These tags may be escaped as ``{{!``, ``{%!``, and ``{#!``
-if you need to include a literal ``{{``, ``{%``, or ``{#`` in the output.
-
-
-``{% apply *function* %}...{% end %}``
- Applies a function to the output of all template code between ``apply``
- and ``end``::
-
- {% apply linkify %}{{name}} said: {{message}}{% end %}
-
- Note that as an implementation detail apply blocks are implemented
- as nested functions and thus may interact strangely with variables
- set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
- within loops.
-
-``{% autoescape *function* %}``
- Sets the autoescape mode for the current file. This does not affect
- other files, even those referenced by ``{% include %}``. Note that
- autoescaping can also be configured globally, at the `.Application`
- or `Loader`.::
-
- {% autoescape xhtml_escape %}
- {% autoescape None %}
-
-``{% block *name* %}...{% end %}``
- Indicates a named, replaceable block for use with ``{% extends %}``.
- Blocks in the parent template will be replaced with the contents of
- the same-named block in a child template.::
-
- <!-- base.html -->
- <title>{% block title %}Default title{% end %}</title>
-
- <!-- mypage.html -->
- {% extends "base.html" %}
- {% block title %}My page title{% end %}
-
-``{% comment ... %}``
- A comment which will be removed from the template output. Note that
- there is no ``{% end %}`` tag; the comment goes from the word ``comment``
- to the closing ``%}`` tag.
-
-``{% extends *filename* %}``
- Inherit from another template. Templates that use ``extends`` should
- contain one or more ``block`` tags to replace content from the parent
- template. Anything in the child template not contained in a ``block``
- tag will be ignored. For an example, see the ``{% block %}`` tag.
-
-``{% for *var* in *expr* %}...{% end %}``
- Same as the python ``for`` statement. ``{% break %}`` and
- ``{% continue %}`` may be used inside the loop.
-
-``{% from *x* import *y* %}``
- Same as the python ``import`` statement.
-
-``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
- Conditional statement - outputs the first section whose condition is
- true. (The ``elif`` and ``else`` sections are optional)
-
-``{% import *module* %}``
- Same as the python ``import`` statement.
-
-``{% include *filename* %}``
- Includes another template file. The included file can see all the local
- variables as if it were copied directly to the point of the ``include``
- directive (the ``{% autoescape %}`` directive is an exception).
- Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
- to include another template with an isolated namespace.
-
-``{% module *expr* %}``
- Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
- not escaped::
-
- {% module Template("foo.html", arg=42) %}
-
- ``UIModules`` are a feature of the `tornado.web.RequestHandler`
- class (and specifically its ``render`` method) and will not work
- when the template system is used on its own in other contexts.
-
-``{% raw *expr* %}``
- Outputs the result of the given expression without autoescaping.
-
-``{% set *x* = *y* %}``
- Sets a local variable.
-
-``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
- Same as the python ``try`` statement.
-
-``{% while *condition* %}... {% end %}``
- Same as the python ``while`` statement. ``{% break %}`` and
- ``{% continue %}`` may be used inside the loop.
-
-``{% whitespace *mode* %}``
- Sets the whitespace mode for the remainder of the current file
- (or until the next ``{% whitespace %}`` directive). See
- `filter_whitespace` for available options. New in Tornado 4.3.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import datetime
-import linecache
-import os.path
-import posixpath
-import re
-import threading
-
-from tornado import escape
-from tornado.log import app_log
-from tornado.util import ObjectDict, exec_in, unicode_type, PY3
-
-if PY3:
- from io import StringIO
-else:
- from cStringIO import StringIO
-
-_DEFAULT_AUTOESCAPE = "xhtml_escape"
-_UNSET = object()
-
-
-def filter_whitespace(mode, text):
- """Transform whitespace in ``text`` according to ``mode``.
-
- Available modes are:
-
- * ``all``: Return all whitespace unmodified.
- * ``single``: Collapse consecutive whitespace with a single whitespace
- character, preserving newlines.
- * ``oneline``: Collapse all runs of whitespace into a single space
- character, removing all newlines in the process.
-
- .. versionadded:: 4.3
- """
- if mode == 'all':
- return text
- elif mode == 'single':
- text = re.sub(r"([\t ]+)", " ", text)
- text = re.sub(r"(\s*\n\s*)", "\n", text)
- return text
- elif mode == 'oneline':
- return re.sub(r"(\s+)", " ", text)
- else:
- raise Exception("invalid whitespace mode %s" % mode)
-
-
-class Template(object):
- """A compiled template.
-
- We compile into Python from the given template_string. You can generate
- the template from variables with generate().
- """
- # note that the constructor's signature is not extracted with
- # autodoc because _UNSET looks like garbage. When changing
- # this signature update website/sphinx/template.rst too.
- def __init__(self, template_string, name="<string>", loader=None,
- compress_whitespace=_UNSET, autoescape=_UNSET,
- whitespace=None):
- """Construct a Template.
-
- :arg str template_string: the contents of the template file.
- :arg str name: the filename from which the template was loaded
- (used for error message).
- :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template,
- used to resolve ``{% include %}`` and ``{% extend %}``
- directives.
- :arg bool compress_whitespace: Deprecated since Tornado 4.3.
- Equivalent to ``whitespace="single"`` if true and
- ``whitespace="all"`` if false.
- :arg str autoescape: The name of a function in the template
- namespace, or ``None`` to disable escaping by default.
- :arg str whitespace: A string specifying treatment of whitespace;
- see `filter_whitespace` for options.
-
- .. versionchanged:: 4.3
- Added ``whitespace`` parameter; deprecated ``compress_whitespace``.
- """
- self.name = escape.native_str(name)
-
- if compress_whitespace is not _UNSET:
- # Convert deprecated compress_whitespace (bool) to whitespace (str).
- if whitespace is not None:
- raise Exception("cannot set both whitespace and compress_whitespace")
- whitespace = "single" if compress_whitespace else "all"
- if whitespace is None:
- if loader and loader.whitespace:
- whitespace = loader.whitespace
- else:
- # Whitespace defaults by filename.
- if name.endswith(".html") or name.endswith(".js"):
- whitespace = "single"
- else:
- whitespace = "all"
- # Validate the whitespace setting.
- filter_whitespace(whitespace, '')
-
- if autoescape is not _UNSET:
- self.autoescape = autoescape
- elif loader:
- self.autoescape = loader.autoescape
- else:
- self.autoescape = _DEFAULT_AUTOESCAPE
-
- self.namespace = loader.namespace if loader else {}
- reader = _TemplateReader(name, escape.native_str(template_string),
- whitespace)
- self.file = _File(self, _parse(reader, self))
- self.code = self._generate_python(loader)
- self.loader = loader
- try:
- # Under python2.5, the fake filename used here must match
- # the module name used in __name__ below.
- # The dont_inherit flag prevents template.py's future imports
- # from being applied to the generated code.
- self.compiled = compile(
- escape.to_unicode(self.code),
- "%s.generated.py" % self.name.replace('.', '_'),
- "exec", dont_inherit=True)
- except Exception:
- formatted_code = _format_code(self.code).rstrip()
- app_log.error("%s code:\n%s", self.name, formatted_code)
- raise
-
- def generate(self, **kwargs):
- """Generate this template with the given arguments."""
- namespace = {
- "escape": escape.xhtml_escape,
- "xhtml_escape": escape.xhtml_escape,
- "url_escape": escape.url_escape,
- "json_encode": escape.json_encode,
- "squeeze": escape.squeeze,
- "linkify": escape.linkify,
- "datetime": datetime,
- "_tt_utf8": escape.utf8, # for internal use
- "_tt_string_types": (unicode_type, bytes),
- # __name__ and __loader__ allow the traceback mechanism to find
- # the generated source code.
- "__name__": self.name.replace('.', '_'),
- "__loader__": ObjectDict(get_source=lambda name: self.code),
- }
- namespace.update(self.namespace)
- namespace.update(kwargs)
- exec_in(self.compiled, namespace)
- execute = namespace["_tt_execute"]
- # Clear the traceback module's cache of source data now that
- # we've generated a new template (mainly for this module's
- # unittests, where different tests reuse the same name).
- linecache.clearcache()
- return execute()
-
- def _generate_python(self, loader):
- buffer = StringIO()
- try:
- # named_blocks maps from names to _NamedBlock objects
- named_blocks = {}
- ancestors = self._get_ancestors(loader)
- ancestors.reverse()
- for ancestor in ancestors:
- ancestor.find_named_blocks(loader, named_blocks)
- writer = _CodeWriter(buffer, named_blocks, loader,
- ancestors[0].template)
- ancestors[0].generate(writer)
- return buffer.getvalue()
- finally:
- buffer.close()
-
- def _get_ancestors(self, loader):
- ancestors = [self.file]
- for chunk in self.file.body.chunks:
- if isinstance(chunk, _ExtendsBlock):
- if not loader:
- raise ParseError("{% extends %} block found, but no "
- "template loader")
- template = loader.load(chunk.name, self.name)
- ancestors.extend(template._get_ancestors(loader))
- return ancestors
-
-
-class BaseLoader(object):
- """Base class for template loaders.
-
- You must use a template loader to use template constructs like
- ``{% extends %}`` and ``{% include %}``. The loader caches all
- templates after they are loaded the first time.
- """
- def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None,
- whitespace=None):
- """Construct a template loader.
-
- :arg str autoescape: The name of a function in the template
- namespace, such as "xhtml_escape", or ``None`` to disable
- autoescaping by default.
- :arg dict namespace: A dictionary to be added to the default template
- namespace, or ``None``.
- :arg str whitespace: A string specifying default behavior for
- whitespace in templates; see `filter_whitespace` for options.
- Default is "single" for files ending in ".html" and ".js" and
- "all" for other files.
-
- .. versionchanged:: 4.3
- Added ``whitespace`` parameter.
- """
- self.autoescape = autoescape
- self.namespace = namespace or {}
- self.whitespace = whitespace
- self.templates = {}
- # self.lock protects self.templates. It's a reentrant lock
- # because templates may load other templates via `include` or
- # `extends`. Note that thanks to the GIL this code would be safe
- # even without the lock, but could lead to wasted work as multiple
- # threads tried to compile the same template simultaneously.
- self.lock = threading.RLock()
-
- def reset(self):
- """Resets the cache of compiled templates."""
- with self.lock:
- self.templates = {}
-
- def resolve_path(self, name, parent_path=None):
- """Converts a possibly-relative path to absolute (used internally)."""
- raise NotImplementedError()
-
- def load(self, name, parent_path=None):
- """Loads a template."""
- name = self.resolve_path(name, parent_path=parent_path)
- with self.lock:
- if name not in self.templates:
- self.templates[name] = self._create_template(name)
- return self.templates[name]
-
- def _create_template(self, name):
- raise NotImplementedError()
-
-
-class Loader(BaseLoader):
- """A template loader that loads from a single root directory.
- """
- def __init__(self, root_directory, **kwargs):
- super(Loader, self).__init__(**kwargs)
- self.root = os.path.abspath(root_directory)
-
- def resolve_path(self, name, parent_path=None):
- if parent_path and not parent_path.startswith("<") and \
- not parent_path.startswith("/") and \
- not name.startswith("/"):
- current_path = os.path.join(self.root, parent_path)
- file_dir = os.path.dirname(os.path.abspath(current_path))
- relative_path = os.path.abspath(os.path.join(file_dir, name))
- if relative_path.startswith(self.root):
- name = relative_path[len(self.root) + 1:]
- return name
-
- def _create_template(self, name):
- path = os.path.join(self.root, name)
- with open(path, "rb") as f:
- template = Template(f.read(), name=name, loader=self)
- return template
-
-
-class DictLoader(BaseLoader):
- """A template loader that loads from a dictionary."""
- def __init__(self, dict, **kwargs):
- super(DictLoader, self).__init__(**kwargs)
- self.dict = dict
-
- def resolve_path(self, name, parent_path=None):
- if parent_path and not parent_path.startswith("<") and \
- not parent_path.startswith("/") and \
- not name.startswith("/"):
- file_dir = posixpath.dirname(parent_path)
- name = posixpath.normpath(posixpath.join(file_dir, name))
- return name
-
- def _create_template(self, name):
- return Template(self.dict[name], name=name, loader=self)
-
-
-class _Node(object):
- def each_child(self):
- return ()
-
- def generate(self, writer):
- raise NotImplementedError()
-
- def find_named_blocks(self, loader, named_blocks):
- for child in self.each_child():
- child.find_named_blocks(loader, named_blocks)
-
-
-class _File(_Node):
- def __init__(self, template, body):
- self.template = template
- self.body = body
- self.line = 0
-
- def generate(self, writer):
- writer.write_line("def _tt_execute():", self.line)
- with writer.indent():
- writer.write_line("_tt_buffer = []", self.line)
- writer.write_line("_tt_append = _tt_buffer.append", self.line)
- self.body.generate(writer)
- writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
-
- def each_child(self):
- return (self.body,)
-
-
-class _ChunkList(_Node):
- def __init__(self, chunks):
- self.chunks = chunks
-
- def generate(self, writer):
- for chunk in self.chunks:
- chunk.generate(writer)
-
- def each_child(self):
- return self.chunks
-
-
-class _NamedBlock(_Node):
- def __init__(self, name, body, template, line):
- self.name = name
- self.body = body
- self.template = template
- self.line = line
-
- def each_child(self):
- return (self.body,)
-
- def generate(self, writer):
- block = writer.named_blocks[self.name]
- with writer.include(block.template, self.line):
- block.body.generate(writer)
-
- def find_named_blocks(self, loader, named_blocks):
- named_blocks[self.name] = self
- _Node.find_named_blocks(self, loader, named_blocks)
-
-
-class _ExtendsBlock(_Node):
- def __init__(self, name):
- self.name = name
-
-
-class _IncludeBlock(_Node):
- def __init__(self, name, reader, line):
- self.name = name
- self.template_name = reader.name
- self.line = line
-
- def find_named_blocks(self, loader, named_blocks):
- included = loader.load(self.name, self.template_name)
- included.file.find_named_blocks(loader, named_blocks)
-
- def generate(self, writer):
- included = writer.loader.load(self.name, self.template_name)
- with writer.include(included, self.line):
- included.file.body.generate(writer)
-
-
-class _ApplyBlock(_Node):
- def __init__(self, method, line, body=None):
- self.method = method
- self.line = line
- self.body = body
-
- def each_child(self):
- return (self.body,)
-
- def generate(self, writer):
- method_name = "_tt_apply%d" % writer.apply_counter
- writer.apply_counter += 1
- writer.write_line("def %s():" % method_name, self.line)
- with writer.indent():
- writer.write_line("_tt_buffer = []", self.line)
- writer.write_line("_tt_append = _tt_buffer.append", self.line)
- self.body.generate(writer)
- writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
- writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
- self.method, method_name), self.line)
-
-
-class _ControlBlock(_Node):
- def __init__(self, statement, line, body=None):
- self.statement = statement
- self.line = line
- self.body = body
-
- def each_child(self):
- return (self.body,)
-
- def generate(self, writer):
- writer.write_line("%s:" % self.statement, self.line)
- with writer.indent():
- self.body.generate(writer)
- # Just in case the body was empty
- writer.write_line("pass", self.line)
-
-
-class _IntermediateControlBlock(_Node):
- def __init__(self, statement, line):
- self.statement = statement
- self.line = line
-
- def generate(self, writer):
- # In case the previous block was empty
- writer.write_line("pass", self.line)
- writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
-
-
-class _Statement(_Node):
- def __init__(self, statement, line):
- self.statement = statement
- self.line = line
-
- def generate(self, writer):
- writer.write_line(self.statement, self.line)
-
-
-class _Expression(_Node):
- def __init__(self, expression, line, raw=False):
- self.expression = expression
- self.line = line
- self.raw = raw
-
- def generate(self, writer):
- writer.write_line("_tt_tmp = %s" % self.expression, self.line)
- writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
- " _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
- writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
- if not self.raw and writer.current_template.autoescape is not None:
- # In python3 functions like xhtml_escape return unicode,
- # so we have to convert to utf8 again.
- writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
- writer.current_template.autoescape, self.line)
- writer.write_line("_tt_append(_tt_tmp)", self.line)
-
-
-class _Module(_Expression):
- def __init__(self, expression, line):
- super(_Module, self).__init__("_tt_modules." + expression, line,
- raw=True)
-
-
-class _Text(_Node):
- def __init__(self, value, line, whitespace):
- self.value = value
- self.line = line
- self.whitespace = whitespace
-
- def generate(self, writer):
- value = self.value
-
- # Compress whitespace if requested, with a crude heuristic to avoid
- # altering preformatted whitespace.
- if "<pre>" not in value:
- value = filter_whitespace(self.whitespace, value)
-
- if value:
- writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
-
-
-class ParseError(Exception):
- """Raised for template syntax errors.
-
- ``ParseError`` instances have ``filename`` and ``lineno`` attributes
- indicating the position of the error.
-
- .. versionchanged:: 4.3
- Added ``filename`` and ``lineno`` attributes.
- """
- def __init__(self, message, filename=None, lineno=0):
- self.message = message
- # The names "filename" and "lineno" are chosen for consistency
- # with python SyntaxError.
- self.filename = filename
- self.lineno = lineno
-
- def __str__(self):
- return '%s at %s:%d' % (self.message, self.filename, self.lineno)
-
-
-class _CodeWriter(object):
- def __init__(self, file, named_blocks, loader, current_template):
- self.file = file
- self.named_blocks = named_blocks
- self.loader = loader
- self.current_template = current_template
- self.apply_counter = 0
- self.include_stack = []
- self._indent = 0
-
- def indent_size(self):
- return self._indent
-
- def indent(self):
- class Indenter(object):
- def __enter__(_):
- self._indent += 1
- return self
-
- def __exit__(_, *args):
- assert self._indent > 0
- self._indent -= 1
-
- return Indenter()
-
- def include(self, template, line):
- self.include_stack.append((self.current_template, line))
- self.current_template = template
-
- class IncludeTemplate(object):
- def __enter__(_):
- return self
-
- def __exit__(_, *args):
- self.current_template = self.include_stack.pop()[0]
-
- return IncludeTemplate()
-
- def write_line(self, line, line_number, indent=None):
- if indent is None:
- indent = self._indent
- line_comment = ' # %s:%d' % (self.current_template.name, line_number)
- if self.include_stack:
- ancestors = ["%s:%d" % (tmpl.name, lineno)
- for (tmpl, lineno) in self.include_stack]
- line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
- print(" " * indent + line + line_comment, file=self.file)
-
-
-class _TemplateReader(object):
- def __init__(self, name, text, whitespace):
- self.name = name
- self.text = text
- self.whitespace = whitespace
- self.line = 1
- self.pos = 0
-
- def find(self, needle, start=0, end=None):
- assert start >= 0, start
- pos = self.pos
- start += pos
- if end is None:
- index = self.text.find(needle, start)
- else:
- end += pos
- assert end >= start
- index = self.text.find(needle, start, end)
- if index != -1:
- index -= pos
- return index
-
- def consume(self, count=None):
- if count is None:
- count = len(self.text) - self.pos
- newpos = self.pos + count
- self.line += self.text.count("\n", self.pos, newpos)
- s = self.text[self.pos:newpos]
- self.pos = newpos
- return s
-
- def remaining(self):
- return len(self.text) - self.pos
-
- def __len__(self):
- return self.remaining()
-
- def __getitem__(self, key):
- if type(key) is slice:
- size = len(self)
- start, stop, step = key.indices(size)
- if start is None:
- start = self.pos
- else:
- start += self.pos
- if stop is not None:
- stop += self.pos
- return self.text[slice(start, stop, step)]
- elif key < 0:
- return self.text[key]
- else:
- return self.text[self.pos + key]
-
- def __str__(self):
- return self.text[self.pos:]
-
- def raise_parse_error(self, msg):
- raise ParseError(msg, self.name, self.line)
-
-
-def _format_code(code):
- lines = code.splitlines()
- format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
- return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
-
-
-def _parse(reader, template, in_block=None, in_loop=None):
- body = _ChunkList([])
- while True:
- # Find next template directive
- curly = 0
- while True:
- curly = reader.find("{", curly)
- if curly == -1 or curly + 1 == reader.remaining():
- # EOF
- if in_block:
- reader.raise_parse_error(
- "Missing {%% end %%} block for %s" % in_block)
- body.chunks.append(_Text(reader.consume(), reader.line,
- reader.whitespace))
- return body
- # If the first curly brace is not the start of a special token,
- # start searching from the character after it
- if reader[curly + 1] not in ("{", "%", "#"):
- curly += 1
- continue
- # When there are more than 2 curlies in a row, use the
- # innermost ones. This is useful when generating languages
- # like latex where curlies are also meaningful
- if (curly + 2 < reader.remaining() and
- reader[curly + 1] == '{' and reader[curly + 2] == '{'):
- curly += 1
- continue
- break
-
- # Append any text before the special token
- if curly > 0:
- cons = reader.consume(curly)
- body.chunks.append(_Text(cons, reader.line,
- reader.whitespace))
-
- start_brace = reader.consume(2)
- line = reader.line
-
- # Template directives may be escaped as "{{!" or "{%!".
- # In this case output the braces and consume the "!".
- # This is especially useful in conjunction with jquery templates,
- # which also use double braces.
- if reader.remaining() and reader[0] == "!":
- reader.consume(1)
- body.chunks.append(_Text(start_brace, line,
- reader.whitespace))
- continue
-
- # Comment
- if start_brace == "{#":
- end = reader.find("#}")
- if end == -1:
- reader.raise_parse_error("Missing end comment #}")
- contents = reader.consume(end).strip()
- reader.consume(2)
- continue
-
- # Expression
- if start_brace == "{{":
- end = reader.find("}}")
- if end == -1:
- reader.raise_parse_error("Missing end expression }}")
- contents = reader.consume(end).strip()
- reader.consume(2)
- if not contents:
- reader.raise_parse_error("Empty expression")
- body.chunks.append(_Expression(contents, line))
- continue
-
- # Block
- assert start_brace == "{%", start_brace
- end = reader.find("%}")
- if end == -1:
- reader.raise_parse_error("Missing end block %}")
- contents = reader.consume(end).strip()
- reader.consume(2)
- if not contents:
- reader.raise_parse_error("Empty block tag ({% %})")
-
- operator, space, suffix = contents.partition(" ")
- suffix = suffix.strip()
-
- # Intermediate ("else", "elif", etc) blocks
- intermediate_blocks = {
- "else": set(["if", "for", "while", "try"]),
- "elif": set(["if"]),
- "except": set(["try"]),
- "finally": set(["try"]),
- }
- allowed_parents = intermediate_blocks.get(operator)
- if allowed_parents is not None:
- if not in_block:
- reader.raise_parse_error("%s outside %s block" %
- (operator, allowed_parents))
- if in_block not in allowed_parents:
- reader.raise_parse_error(
- "%s block cannot be attached to %s block" %
- (operator, in_block))
- body.chunks.append(_IntermediateControlBlock(contents, line))
- continue
-
- # End tag
- elif operator == "end":
- if not in_block:
- reader.raise_parse_error("Extra {% end %} block")
- return body
-
- elif operator in ("extends", "include", "set", "import", "from",
- "comment", "autoescape", "whitespace", "raw",
- "module"):
- if operator == "comment":
- continue
- if operator == "extends":
- suffix = suffix.strip('"').strip("'")
- if not suffix:
- reader.raise_parse_error("extends missing file path")
- block = _ExtendsBlock(suffix)
- elif operator in ("import", "from"):
- if not suffix:
- reader.raise_parse_error("import missing statement")
- block = _Statement(contents, line)
- elif operator == "include":
- suffix = suffix.strip('"').strip("'")
- if not suffix:
- reader.raise_parse_error("include missing file path")
- block = _IncludeBlock(suffix, reader, line)
- elif operator == "set":
- if not suffix:
- reader.raise_parse_error("set missing statement")
- block = _Statement(suffix, line)
- elif operator == "autoescape":
- fn = suffix.strip()
- if fn == "None":
- fn = None
- template.autoescape = fn
- continue
- elif operator == "whitespace":
- mode = suffix.strip()
- # Validate the selected mode
- filter_whitespace(mode, '')
- reader.whitespace = mode
- continue
- elif operator == "raw":
- block = _Expression(suffix, line, raw=True)
- elif operator == "module":
- block = _Module(suffix, line)
- body.chunks.append(block)
- continue
-
- elif operator in ("apply", "block", "try", "if", "for", "while"):
- # parse inner body recursively
- if operator in ("for", "while"):
- block_body = _parse(reader, template, operator, operator)
- elif operator == "apply":
- # apply creates a nested function so syntactically it's not
- # in the loop.
- block_body = _parse(reader, template, operator, None)
- else:
- block_body = _parse(reader, template, operator, in_loop)
-
- if operator == "apply":
- if not suffix:
- reader.raise_parse_error("apply missing method name")
- block = _ApplyBlock(suffix, line, block_body)
- elif operator == "block":
- if not suffix:
- reader.raise_parse_error("block missing name")
- block = _NamedBlock(suffix, block_body, template, line)
- else:
- block = _ControlBlock(contents, line, block_body)
- body.chunks.append(block)
- continue
-
- elif operator in ("break", "continue"):
- if not in_loop:
- reader.raise_parse_error("%s outside %s block" %
- (operator, set(["for", "while"])))
- body.chunks.append(_Statement(contents, line))
- continue
-
- else:
- reader.raise_parse_error("unknown operator: %r" % operator)
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A simple template system that compiles templates to Python code.
+
+Basic usage looks like::
+
+ t = template.Template("<html>{{ myvalue }}</html>")
+ print(t.generate(myvalue="XXX"))
+
+`Loader` is a class that loads templates from a root directory and caches
+the compiled templates::
+
+ loader = template.Loader("/home/btaylor")
+ print(loader.load("test.html").generate(myvalue="XXX"))
+
+We compile all templates to raw Python. Error-reporting is currently... uh,
+interesting. Syntax for the templates::
+
+ ### base.html
+ <html>
+ <head>
+ <title>{% block title %}Default title{% end %}</title>
+ </head>
+ <body>
+ <ul>
+ {% for student in students %}
+ {% block student %}
+ <li>{{ escape(student.name) }}</li>
+ {% end %}
+ {% end %}
+ </ul>
+ </body>
+ </html>
+
+ ### bold.html
+ {% extends "base.html" %}
+
+ {% block title %}A bolder title{% end %}
+
+ {% block student %}
+ <li><span style="bold">{{ escape(student.name) }}</span></li>
+ {% end %}
+
+Unlike most other template systems, we do not put any restrictions on the
+expressions you can include in your statements. ``if`` and ``for`` blocks get
+translated exactly into Python, so you can do complex expressions like::
+
+ {% for student in [p for p in people if p.student and p.age > 23] %}
+ <li>{{ escape(student.name) }}</li>
+ {% end %}
+
+Translating directly to Python means you can apply functions to expressions
+easily, like the ``escape()`` function in the examples above. You can pass
+functions in to your template just like any other variable
+(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
+
+ ### Python code
+ def add(x, y):
+ return x + y
+ template.execute(add=add)
+
+ ### The template
+ {{ add(1, 2) }}
+
+We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
+`.json_encode()`, and `.squeeze()` to all templates by default.
+
+Typical applications do not create `Template` or `Loader` instances by
+hand, but instead use the `~.RequestHandler.render` and
+`~.RequestHandler.render_string` methods of
+`tornado.web.RequestHandler`, which load templates automatically based
+on the ``template_path`` `.Application` setting.
+
+Variable names beginning with ``_tt_`` are reserved by the template
+system and should not be used by application code.
+
+Syntax Reference
+----------------
+
+Template expressions are surrounded by double curly braces: ``{{ ... }}``.
+The contents may be any python expression, which will be escaped according
+to the current autoescape setting and inserted into the output. Other
+template directives use ``{% %}``.
+
+To comment out a section so that it is omitted from the output, surround it
+with ``{# ... #}``.
+
+These tags may be escaped as ``{{!``, ``{%!``, and ``{#!``
+if you need to include a literal ``{{``, ``{%``, or ``{#`` in the output.
+
+
+``{% apply *function* %}...{% end %}``
+ Applies a function to the output of all template code between ``apply``
+ and ``end``::
+
+ {% apply linkify %}{{name}} said: {{message}}{% end %}
+
+ Note that as an implementation detail apply blocks are implemented
+ as nested functions and thus may interact strangely with variables
+ set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
+ within loops.
+
+``{% autoescape *function* %}``
+ Sets the autoescape mode for the current file. This does not affect
+ other files, even those referenced by ``{% include %}``. Note that
+ autoescaping can also be configured globally, at the `.Application`
+ or `Loader`.::
+
+ {% autoescape xhtml_escape %}
+ {% autoescape None %}
+
+``{% block *name* %}...{% end %}``
+ Indicates a named, replaceable block for use with ``{% extends %}``.
+ Blocks in the parent template will be replaced with the contents of
+ the same-named block in a child template.::
+
+ <!-- base.html -->
+ <title>{% block title %}Default title{% end %}</title>
+
+ <!-- mypage.html -->
+ {% extends "base.html" %}
+ {% block title %}My page title{% end %}
+
+``{% comment ... %}``
+ A comment which will be removed from the template output. Note that
+ there is no ``{% end %}`` tag; the comment goes from the word ``comment``
+ to the closing ``%}`` tag.
+
+``{% extends *filename* %}``
+ Inherit from another template. Templates that use ``extends`` should
+ contain one or more ``block`` tags to replace content from the parent
+ template. Anything in the child template not contained in a ``block``
+ tag will be ignored. For an example, see the ``{% block %}`` tag.
+
+``{% for *var* in *expr* %}...{% end %}``
+ Same as the python ``for`` statement. ``{% break %}`` and
+ ``{% continue %}`` may be used inside the loop.
+
+``{% from *x* import *y* %}``
+ Same as the python ``import`` statement.
+
+``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
+ Conditional statement - outputs the first section whose condition is
+ true. (The ``elif`` and ``else`` sections are optional)
+
+``{% import *module* %}``
+ Same as the python ``import`` statement.
+
+``{% include *filename* %}``
+ Includes another template file. The included file can see all the local
+ variables as if it were copied directly to the point of the ``include``
+ directive (the ``{% autoescape %}`` directive is an exception).
+ Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
+ to include another template with an isolated namespace.
+
+``{% module *expr* %}``
+ Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
+ not escaped::
+
+ {% module Template("foo.html", arg=42) %}
+
+ ``UIModules`` are a feature of the `tornado.web.RequestHandler`
+ class (and specifically its ``render`` method) and will not work
+ when the template system is used on its own in other contexts.
+
+``{% raw *expr* %}``
+ Outputs the result of the given expression without autoescaping.
+
+``{% set *x* = *y* %}``
+ Sets a local variable.
+
+``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
+ Same as the python ``try`` statement.
+
+``{% while *condition* %}... {% end %}``
+ Same as the python ``while`` statement. ``{% break %}`` and
+ ``{% continue %}`` may be used inside the loop.
+
+``{% whitespace *mode* %}``
+ Sets the whitespace mode for the remainder of the current file
+ (or until the next ``{% whitespace %}`` directive). See
+ `filter_whitespace` for available options. New in Tornado 4.3.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import datetime
+import linecache
+import os.path
+import posixpath
+import re
+import threading
+
+from tornado import escape
+from tornado.log import app_log
+from tornado.util import ObjectDict, exec_in, unicode_type, PY3
+
+if PY3:
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+_DEFAULT_AUTOESCAPE = "xhtml_escape"
+_UNSET = object()
+
+
+def filter_whitespace(mode, text):
+ """Transform whitespace in ``text`` according to ``mode``.
+
+ Available modes are:
+
+ * ``all``: Return all whitespace unmodified.
+ * ``single``: Collapse consecutive whitespace with a single whitespace
+ character, preserving newlines.
+ * ``oneline``: Collapse all runs of whitespace into a single space
+ character, removing all newlines in the process.
+
+ .. versionadded:: 4.3
+ """
+ if mode == 'all':
+ return text
+ elif mode == 'single':
+ text = re.sub(r"([\t ]+)", " ", text)
+ text = re.sub(r"(\s*\n\s*)", "\n", text)
+ return text
+ elif mode == 'oneline':
+ return re.sub(r"(\s+)", " ", text)
+ else:
+ raise Exception("invalid whitespace mode %s" % mode)
+
+
+class Template(object):
+ """A compiled template.
+
+ We compile into Python from the given template_string. You can generate
+ the template from variables with generate().
+ """
+ # note that the constructor's signature is not extracted with
+ # autodoc because _UNSET looks like garbage. When changing
+ # this signature update website/sphinx/template.rst too.
+ def __init__(self, template_string, name="<string>", loader=None,
+ compress_whitespace=_UNSET, autoescape=_UNSET,
+ whitespace=None):
+ """Construct a Template.
+
+ :arg str template_string: the contents of the template file.
+ :arg str name: the filename from which the template was loaded
+ (used for error message).
+ :arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template,
+ used to resolve ``{% include %}`` and ``{% extend %}``
+ directives.
+ :arg bool compress_whitespace: Deprecated since Tornado 4.3.
+ Equivalent to ``whitespace="single"`` if true and
+ ``whitespace="all"`` if false.
+ :arg str autoescape: The name of a function in the template
+ namespace, or ``None`` to disable escaping by default.
+ :arg str whitespace: A string specifying treatment of whitespace;
+ see `filter_whitespace` for options.
+
+ .. versionchanged:: 4.3
+ Added ``whitespace`` parameter; deprecated ``compress_whitespace``.
+ """
+ self.name = escape.native_str(name)
+
+ if compress_whitespace is not _UNSET:
+ # Convert deprecated compress_whitespace (bool) to whitespace (str).
+ if whitespace is not None:
+ raise Exception("cannot set both whitespace and compress_whitespace")
+ whitespace = "single" if compress_whitespace else "all"
+ if whitespace is None:
+ if loader and loader.whitespace:
+ whitespace = loader.whitespace
+ else:
+ # Whitespace defaults by filename.
+ if name.endswith(".html") or name.endswith(".js"):
+ whitespace = "single"
+ else:
+ whitespace = "all"
+ # Validate the whitespace setting.
+ filter_whitespace(whitespace, '')
+
+ if autoescape is not _UNSET:
+ self.autoescape = autoescape
+ elif loader:
+ self.autoescape = loader.autoescape
+ else:
+ self.autoescape = _DEFAULT_AUTOESCAPE
+
+ self.namespace = loader.namespace if loader else {}
+ reader = _TemplateReader(name, escape.native_str(template_string),
+ whitespace)
+ self.file = _File(self, _parse(reader, self))
+ self.code = self._generate_python(loader)
+ self.loader = loader
+ try:
+ # Under python2.5, the fake filename used here must match
+ # the module name used in __name__ below.
+ # The dont_inherit flag prevents template.py's future imports
+ # from being applied to the generated code.
+ self.compiled = compile(
+ escape.to_unicode(self.code),
+ "%s.generated.py" % self.name.replace('.', '_'),
+ "exec", dont_inherit=True)
+ except Exception:
+ formatted_code = _format_code(self.code).rstrip()
+ app_log.error("%s code:\n%s", self.name, formatted_code)
+ raise
+
+ def generate(self, **kwargs):
+ """Generate this template with the given arguments."""
+ namespace = {
+ "escape": escape.xhtml_escape,
+ "xhtml_escape": escape.xhtml_escape,
+ "url_escape": escape.url_escape,
+ "json_encode": escape.json_encode,
+ "squeeze": escape.squeeze,
+ "linkify": escape.linkify,
+ "datetime": datetime,
+ "_tt_utf8": escape.utf8, # for internal use
+ "_tt_string_types": (unicode_type, bytes),
+ # __name__ and __loader__ allow the traceback mechanism to find
+ # the generated source code.
+ "__name__": self.name.replace('.', '_'),
+ "__loader__": ObjectDict(get_source=lambda name: self.code),
+ }
+ namespace.update(self.namespace)
+ namespace.update(kwargs)
+ exec_in(self.compiled, namespace)
+ execute = namespace["_tt_execute"]
+ # Clear the traceback module's cache of source data now that
+ # we've generated a new template (mainly for this module's
+ # unittests, where different tests reuse the same name).
+ linecache.clearcache()
+ return execute()
+
+ def _generate_python(self, loader):
+ buffer = StringIO()
+ try:
+ # named_blocks maps from names to _NamedBlock objects
+ named_blocks = {}
+ ancestors = self._get_ancestors(loader)
+ ancestors.reverse()
+ for ancestor in ancestors:
+ ancestor.find_named_blocks(loader, named_blocks)
+ writer = _CodeWriter(buffer, named_blocks, loader,
+ ancestors[0].template)
+ ancestors[0].generate(writer)
+ return buffer.getvalue()
+ finally:
+ buffer.close()
+
+ def _get_ancestors(self, loader):
+ ancestors = [self.file]
+ for chunk in self.file.body.chunks:
+ if isinstance(chunk, _ExtendsBlock):
+ if not loader:
+ raise ParseError("{% extends %} block found, but no "
+ "template loader")
+ template = loader.load(chunk.name, self.name)
+ ancestors.extend(template._get_ancestors(loader))
+ return ancestors
+
+
+class BaseLoader(object):
+ """Base class for template loaders.
+
+ You must use a template loader to use template constructs like
+ ``{% extends %}`` and ``{% include %}``. The loader caches all
+ templates after they are loaded the first time.
+ """
+ def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None,
+ whitespace=None):
+ """Construct a template loader.
+
+ :arg str autoescape: The name of a function in the template
+ namespace, such as "xhtml_escape", or ``None`` to disable
+ autoescaping by default.
+ :arg dict namespace: A dictionary to be added to the default template
+ namespace, or ``None``.
+ :arg str whitespace: A string specifying default behavior for
+ whitespace in templates; see `filter_whitespace` for options.
+ Default is "single" for files ending in ".html" and ".js" and
+ "all" for other files.
+
+ .. versionchanged:: 4.3
+ Added ``whitespace`` parameter.
+ """
+ self.autoescape = autoescape
+ self.namespace = namespace or {}
+ self.whitespace = whitespace
+ self.templates = {}
+ # self.lock protects self.templates. It's a reentrant lock
+ # because templates may load other templates via `include` or
+ # `extends`. Note that thanks to the GIL this code would be safe
+ # even without the lock, but could lead to wasted work as multiple
+ # threads tried to compile the same template simultaneously.
+ self.lock = threading.RLock()
+
+ def reset(self):
+ """Resets the cache of compiled templates."""
+ with self.lock:
+ self.templates = {}
+
+ def resolve_path(self, name, parent_path=None):
+ """Converts a possibly-relative path to absolute (used internally)."""
+ raise NotImplementedError()
+
+ def load(self, name, parent_path=None):
+ """Loads a template."""
+ name = self.resolve_path(name, parent_path=parent_path)
+ with self.lock:
+ if name not in self.templates:
+ self.templates[name] = self._create_template(name)
+ return self.templates[name]
+
+ def _create_template(self, name):
+ raise NotImplementedError()
+
+
+class Loader(BaseLoader):
+ """A template loader that loads from a single root directory.
+ """
+ def __init__(self, root_directory, **kwargs):
+ super(Loader, self).__init__(**kwargs)
+ self.root = os.path.abspath(root_directory)
+
+ def resolve_path(self, name, parent_path=None):
+ if parent_path and not parent_path.startswith("<") and \
+ not parent_path.startswith("/") and \
+ not name.startswith("/"):
+ current_path = os.path.join(self.root, parent_path)
+ file_dir = os.path.dirname(os.path.abspath(current_path))
+ relative_path = os.path.abspath(os.path.join(file_dir, name))
+ if relative_path.startswith(self.root):
+ name = relative_path[len(self.root) + 1:]
+ return name
+
+ def _create_template(self, name):
+ path = os.path.join(self.root, name)
+ with open(path, "rb") as f:
+ template = Template(f.read(), name=name, loader=self)
+ return template
+
+
+class DictLoader(BaseLoader):
+ """A template loader that loads from a dictionary."""
+ def __init__(self, dict, **kwargs):
+ super(DictLoader, self).__init__(**kwargs)
+ self.dict = dict
+
+ def resolve_path(self, name, parent_path=None):
+ if parent_path and not parent_path.startswith("<") and \
+ not parent_path.startswith("/") and \
+ not name.startswith("/"):
+ file_dir = posixpath.dirname(parent_path)
+ name = posixpath.normpath(posixpath.join(file_dir, name))
+ return name
+
+ def _create_template(self, name):
+ return Template(self.dict[name], name=name, loader=self)
+
+
+class _Node(object):
+ def each_child(self):
+ return ()
+
+ def generate(self, writer):
+ raise NotImplementedError()
+
+ def find_named_blocks(self, loader, named_blocks):
+ for child in self.each_child():
+ child.find_named_blocks(loader, named_blocks)
+
+
+class _File(_Node):
+ def __init__(self, template, body):
+ self.template = template
+ self.body = body
+ self.line = 0
+
+ def generate(self, writer):
+ writer.write_line("def _tt_execute():", self.line)
+ with writer.indent():
+ writer.write_line("_tt_buffer = []", self.line)
+ writer.write_line("_tt_append = _tt_buffer.append", self.line)
+ self.body.generate(writer)
+ writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
+
+ def each_child(self):
+ return (self.body,)
+
+
+class _ChunkList(_Node):
+ def __init__(self, chunks):
+ self.chunks = chunks
+
+ def generate(self, writer):
+ for chunk in self.chunks:
+ chunk.generate(writer)
+
+ def each_child(self):
+ return self.chunks
+
+
+class _NamedBlock(_Node):
+ def __init__(self, name, body, template, line):
+ self.name = name
+ self.body = body
+ self.template = template
+ self.line = line
+
+ def each_child(self):
+ return (self.body,)
+
+ def generate(self, writer):
+ block = writer.named_blocks[self.name]
+ with writer.include(block.template, self.line):
+ block.body.generate(writer)
+
+ def find_named_blocks(self, loader, named_blocks):
+ named_blocks[self.name] = self
+ _Node.find_named_blocks(self, loader, named_blocks)
+
+
+class _ExtendsBlock(_Node):
+ def __init__(self, name):
+ self.name = name
+
+
+class _IncludeBlock(_Node):
+ def __init__(self, name, reader, line):
+ self.name = name
+ self.template_name = reader.name
+ self.line = line
+
+ def find_named_blocks(self, loader, named_blocks):
+ included = loader.load(self.name, self.template_name)
+ included.file.find_named_blocks(loader, named_blocks)
+
+ def generate(self, writer):
+ included = writer.loader.load(self.name, self.template_name)
+ with writer.include(included, self.line):
+ included.file.body.generate(writer)
+
+
+class _ApplyBlock(_Node):
+ def __init__(self, method, line, body=None):
+ self.method = method
+ self.line = line
+ self.body = body
+
+ def each_child(self):
+ return (self.body,)
+
+ def generate(self, writer):
+ method_name = "_tt_apply%d" % writer.apply_counter
+ writer.apply_counter += 1
+ writer.write_line("def %s():" % method_name, self.line)
+ with writer.indent():
+ writer.write_line("_tt_buffer = []", self.line)
+ writer.write_line("_tt_append = _tt_buffer.append", self.line)
+ self.body.generate(writer)
+ writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
+ writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
+ self.method, method_name), self.line)
+
+
+class _ControlBlock(_Node):
+ def __init__(self, statement, line, body=None):
+ self.statement = statement
+ self.line = line
+ self.body = body
+
+ def each_child(self):
+ return (self.body,)
+
+ def generate(self, writer):
+ writer.write_line("%s:" % self.statement, self.line)
+ with writer.indent():
+ self.body.generate(writer)
+ # Just in case the body was empty
+ writer.write_line("pass", self.line)
+
+
+class _IntermediateControlBlock(_Node):
+ def __init__(self, statement, line):
+ self.statement = statement
+ self.line = line
+
+ def generate(self, writer):
+ # In case the previous block was empty
+ writer.write_line("pass", self.line)
+ writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
+
+
+class _Statement(_Node):
+ def __init__(self, statement, line):
+ self.statement = statement
+ self.line = line
+
+ def generate(self, writer):
+ writer.write_line(self.statement, self.line)
+
+
+class _Expression(_Node):
+ def __init__(self, expression, line, raw=False):
+ self.expression = expression
+ self.line = line
+ self.raw = raw
+
+ def generate(self, writer):
+ writer.write_line("_tt_tmp = %s" % self.expression, self.line)
+ writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
+ " _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
+ writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
+ if not self.raw and writer.current_template.autoescape is not None:
+ # In python3 functions like xhtml_escape return unicode,
+ # so we have to convert to utf8 again.
+ writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
+ writer.current_template.autoescape, self.line)
+ writer.write_line("_tt_append(_tt_tmp)", self.line)
+
+
+class _Module(_Expression):
+ def __init__(self, expression, line):
+ super(_Module, self).__init__("_tt_modules." + expression, line,
+ raw=True)
+
+
+class _Text(_Node):
+ def __init__(self, value, line, whitespace):
+ self.value = value
+ self.line = line
+ self.whitespace = whitespace
+
+ def generate(self, writer):
+ value = self.value
+
+ # Compress whitespace if requested, with a crude heuristic to avoid
+ # altering preformatted whitespace.
+ if "<pre>" not in value:
+ value = filter_whitespace(self.whitespace, value)
+
+ if value:
+ writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
+
+
+class ParseError(Exception):
+ """Raised for template syntax errors.
+
+ ``ParseError`` instances have ``filename`` and ``lineno`` attributes
+ indicating the position of the error.
+
+ .. versionchanged:: 4.3
+ Added ``filename`` and ``lineno`` attributes.
+ """
+ def __init__(self, message, filename=None, lineno=0):
+ self.message = message
+ # The names "filename" and "lineno" are chosen for consistency
+ # with python SyntaxError.
+ self.filename = filename
+ self.lineno = lineno
+
+ def __str__(self):
+ return '%s at %s:%d' % (self.message, self.filename, self.lineno)
+
+
+class _CodeWriter(object):
+ def __init__(self, file, named_blocks, loader, current_template):
+ self.file = file
+ self.named_blocks = named_blocks
+ self.loader = loader
+ self.current_template = current_template
+ self.apply_counter = 0
+ self.include_stack = []
+ self._indent = 0
+
+ def indent_size(self):
+ return self._indent
+
+ def indent(self):
+ class Indenter(object):
+ def __enter__(_):
+ self._indent += 1
+ return self
+
+ def __exit__(_, *args):
+ assert self._indent > 0
+ self._indent -= 1
+
+ return Indenter()
+
+ def include(self, template, line):
+ self.include_stack.append((self.current_template, line))
+ self.current_template = template
+
+ class IncludeTemplate(object):
+ def __enter__(_):
+ return self
+
+ def __exit__(_, *args):
+ self.current_template = self.include_stack.pop()[0]
+
+ return IncludeTemplate()
+
+ def write_line(self, line, line_number, indent=None):
+ if indent is None:
+ indent = self._indent
+ line_comment = ' # %s:%d' % (self.current_template.name, line_number)
+ if self.include_stack:
+ ancestors = ["%s:%d" % (tmpl.name, lineno)
+ for (tmpl, lineno) in self.include_stack]
+ line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
+ print(" " * indent + line + line_comment, file=self.file)
+
+
+class _TemplateReader(object):
+ def __init__(self, name, text, whitespace):
+ self.name = name
+ self.text = text
+ self.whitespace = whitespace
+ self.line = 1
+ self.pos = 0
+
+ def find(self, needle, start=0, end=None):
+ assert start >= 0, start
+ pos = self.pos
+ start += pos
+ if end is None:
+ index = self.text.find(needle, start)
+ else:
+ end += pos
+ assert end >= start
+ index = self.text.find(needle, start, end)
+ if index != -1:
+ index -= pos
+ return index
+
+ def consume(self, count=None):
+ if count is None:
+ count = len(self.text) - self.pos
+ newpos = self.pos + count
+ self.line += self.text.count("\n", self.pos, newpos)
+ s = self.text[self.pos:newpos]
+ self.pos = newpos
+ return s
+
+ def remaining(self):
+ return len(self.text) - self.pos
+
+ def __len__(self):
+ return self.remaining()
+
+ def __getitem__(self, key):
+ if type(key) is slice:
+ size = len(self)
+ start, stop, step = key.indices(size)
+ if start is None:
+ start = self.pos
+ else:
+ start += self.pos
+ if stop is not None:
+ stop += self.pos
+ return self.text[slice(start, stop, step)]
+ elif key < 0:
+ return self.text[key]
+ else:
+ return self.text[self.pos + key]
+
+ def __str__(self):
+ return self.text[self.pos:]
+
+ def raise_parse_error(self, msg):
+ raise ParseError(msg, self.name, self.line)
+
+
+def _format_code(code):
+ lines = code.splitlines()
+ format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
+ return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
+
+
+def _parse(reader, template, in_block=None, in_loop=None):
+ body = _ChunkList([])
+ while True:
+ # Find next template directive
+ curly = 0
+ while True:
+ curly = reader.find("{", curly)
+ if curly == -1 or curly + 1 == reader.remaining():
+ # EOF
+ if in_block:
+ reader.raise_parse_error(
+ "Missing {%% end %%} block for %s" % in_block)
+ body.chunks.append(_Text(reader.consume(), reader.line,
+ reader.whitespace))
+ return body
+ # If the first curly brace is not the start of a special token,
+ # start searching from the character after it
+ if reader[curly + 1] not in ("{", "%", "#"):
+ curly += 1
+ continue
+ # When there are more than 2 curlies in a row, use the
+ # innermost ones. This is useful when generating languages
+ # like latex where curlies are also meaningful
+ if (curly + 2 < reader.remaining() and
+ reader[curly + 1] == '{' and reader[curly + 2] == '{'):
+ curly += 1
+ continue
+ break
+
+ # Append any text before the special token
+ if curly > 0:
+ cons = reader.consume(curly)
+ body.chunks.append(_Text(cons, reader.line,
+ reader.whitespace))
+
+ start_brace = reader.consume(2)
+ line = reader.line
+
+ # Template directives may be escaped as "{{!" or "{%!".
+ # In this case output the braces and consume the "!".
+ # This is especially useful in conjunction with jquery templates,
+ # which also use double braces.
+ if reader.remaining() and reader[0] == "!":
+ reader.consume(1)
+ body.chunks.append(_Text(start_brace, line,
+ reader.whitespace))
+ continue
+
+ # Comment
+ if start_brace == "{#":
+ end = reader.find("#}")
+ if end == -1:
+ reader.raise_parse_error("Missing end comment #}")
+ contents = reader.consume(end).strip()
+ reader.consume(2)
+ continue
+
+ # Expression
+ if start_brace == "{{":
+ end = reader.find("}}")
+ if end == -1:
+ reader.raise_parse_error("Missing end expression }}")
+ contents = reader.consume(end).strip()
+ reader.consume(2)
+ if not contents:
+ reader.raise_parse_error("Empty expression")
+ body.chunks.append(_Expression(contents, line))
+ continue
+
+ # Block
+ assert start_brace == "{%", start_brace
+ end = reader.find("%}")
+ if end == -1:
+ reader.raise_parse_error("Missing end block %}")
+ contents = reader.consume(end).strip()
+ reader.consume(2)
+ if not contents:
+ reader.raise_parse_error("Empty block tag ({% %})")
+
+ operator, space, suffix = contents.partition(" ")
+ suffix = suffix.strip()
+
+ # Intermediate ("else", "elif", etc) blocks
+ intermediate_blocks = {
+ "else": set(["if", "for", "while", "try"]),
+ "elif": set(["if"]),
+ "except": set(["try"]),
+ "finally": set(["try"]),
+ }
+ allowed_parents = intermediate_blocks.get(operator)
+ if allowed_parents is not None:
+ if not in_block:
+ reader.raise_parse_error("%s outside %s block" %
+ (operator, allowed_parents))
+ if in_block not in allowed_parents:
+ reader.raise_parse_error(
+ "%s block cannot be attached to %s block" %
+ (operator, in_block))
+ body.chunks.append(_IntermediateControlBlock(contents, line))
+ continue
+
+ # End tag
+ elif operator == "end":
+ if not in_block:
+ reader.raise_parse_error("Extra {% end %} block")
+ return body
+
+ elif operator in ("extends", "include", "set", "import", "from",
+ "comment", "autoescape", "whitespace", "raw",
+ "module"):
+ if operator == "comment":
+ continue
+ if operator == "extends":
+ suffix = suffix.strip('"').strip("'")
+ if not suffix:
+ reader.raise_parse_error("extends missing file path")
+ block = _ExtendsBlock(suffix)
+ elif operator in ("import", "from"):
+ if not suffix:
+ reader.raise_parse_error("import missing statement")
+ block = _Statement(contents, line)
+ elif operator == "include":
+ suffix = suffix.strip('"').strip("'")
+ if not suffix:
+ reader.raise_parse_error("include missing file path")
+ block = _IncludeBlock(suffix, reader, line)
+ elif operator == "set":
+ if not suffix:
+ reader.raise_parse_error("set missing statement")
+ block = _Statement(suffix, line)
+ elif operator == "autoescape":
+ fn = suffix.strip()
+ if fn == "None":
+ fn = None
+ template.autoescape = fn
+ continue
+ elif operator == "whitespace":
+ mode = suffix.strip()
+ # Validate the selected mode
+ filter_whitespace(mode, '')
+ reader.whitespace = mode
+ continue
+ elif operator == "raw":
+ block = _Expression(suffix, line, raw=True)
+ elif operator == "module":
+ block = _Module(suffix, line)
+ body.chunks.append(block)
+ continue
+
+ elif operator in ("apply", "block", "try", "if", "for", "while"):
+ # parse inner body recursively
+ if operator in ("for", "while"):
+ block_body = _parse(reader, template, operator, operator)
+ elif operator == "apply":
+ # apply creates a nested function so syntactically it's not
+ # in the loop.
+ block_body = _parse(reader, template, operator, None)
+ else:
+ block_body = _parse(reader, template, operator, in_loop)
+
+ if operator == "apply":
+ if not suffix:
+ reader.raise_parse_error("apply missing method name")
+ block = _ApplyBlock(suffix, line, block_body)
+ elif operator == "block":
+ if not suffix:
+ reader.raise_parse_error("block missing name")
+ block = _NamedBlock(suffix, block_body, template, line)
+ else:
+ block = _ControlBlock(contents, line, block_body)
+ body.chunks.append(block)
+ continue
+
+ elif operator in ("break", "continue"):
+ if not in_loop:
+ reader.raise_parse_error("%s outside %s block" %
+ (operator, set(["for", "while"])))
+ body.chunks.append(_Statement(contents, line))
+ continue
+
+ else:
+ reader.raise_parse_error("unknown operator: %r" % operator)
diff --git a/contrib/python/tornado/tornado-4/tornado/testing.py b/contrib/python/tornado/tornado-4/tornado/testing.py
index 82a3b93732..762d3133a0 100644
--- a/contrib/python/tornado/tornado-4/tornado/testing.py
+++ b/contrib/python/tornado/tornado-4/tornado/testing.py
@@ -1,742 +1,742 @@
-#!/usr/bin/env python
-"""Support classes for automated testing.
-
-* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
- with additional support for testing asynchronous (`.IOLoop`-based) code.
-
-* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy.
-
-* `main()`: A simple test runner (wrapper around unittest.main()) with support
- for the tornado.autoreload module to rerun the tests when code changes.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-try:
- from tornado import gen
- from tornado.httpclient import AsyncHTTPClient
- from tornado.httpserver import HTTPServer
- from tornado.simple_httpclient import SimpleAsyncHTTPClient
- from tornado.ioloop import IOLoop, TimeoutError
- from tornado import netutil
- from tornado.process import Subprocess
-except ImportError:
- # These modules are not importable on app engine. Parts of this module
- # won't work, but e.g. LogTrapTestCase and main() will.
- AsyncHTTPClient = None # type: ignore
- gen = None # type: ignore
- HTTPServer = None # type: ignore
- IOLoop = None # type: ignore
- netutil = None # type: ignore
- SimpleAsyncHTTPClient = None # type: ignore
- Subprocess = None # type: ignore
-from tornado.log import gen_log, app_log
-from tornado.stack_context import ExceptionStackContext
-from tornado.util import raise_exc_info, basestring_type, PY3
-import functools
-import inspect
-import logging
-import os
-import re
-import signal
-import socket
-import sys
-
-if PY3:
- from io import StringIO
-else:
- from cStringIO import StringIO
-
-try:
- from collections.abc import Generator as GeneratorType # type: ignore
-except ImportError:
- from types import GeneratorType # type: ignore
-
-if sys.version_info >= (3, 5):
- iscoroutine = inspect.iscoroutine # type: ignore
- iscoroutinefunction = inspect.iscoroutinefunction # type: ignore
-else:
- iscoroutine = iscoroutinefunction = lambda f: False
-
-# Tornado's own test suite requires the updated unittest module
-# (either py27+ or unittest2) so tornado.test.util enforces
-# this requirement, but for other users of tornado.testing we want
-# to allow the older version if unitest2 is not available.
-if PY3:
- # On python 3, mixing unittest2 and unittest (including doctest)
- # doesn't seem to work, so always use unittest.
- import unittest
-else:
- # On python 2, prefer unittest2 when available.
- try:
- import unittest2 as unittest # type: ignore
- except ImportError:
- import unittest # type: ignore
-
-_next_port = 10000
-
-
-def get_unused_port():
- """Returns a (hopefully) unused port number.
-
- This function does not guarantee that the port it returns is available,
- only that a series of get_unused_port calls in a single process return
- distinct ports.
-
- .. deprecated::
- Use bind_unused_port instead, which is guaranteed to find an unused port.
- """
- global _next_port
- port = _next_port
- _next_port = _next_port + 1
- return port
-
-
-def bind_unused_port(reuse_port=False):
- """Binds a server socket to an available port on localhost.
-
- Returns a tuple (socket, port).
-
- .. versionchanged:: 4.4
- Always binds to ``127.0.0.1`` without resolving the name
- ``localhost``.
- """
- sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
- reuse_port=reuse_port)[0]
- port = sock.getsockname()[1]
- return sock, port
-
-
-def get_async_test_timeout():
- """Get the global timeout setting for async tests.
-
- Returns a float, the timeout in seconds.
-
- .. versionadded:: 3.1
- """
- try:
- return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
- except (ValueError, TypeError):
- return 5
-
-
-class _TestMethodWrapper(object):
- """Wraps a test method to raise an error if it returns a value.
-
- This is mainly used to detect undecorated generators (if a test
- method yields it must use a decorator to consume the generator),
- but will also detect other kinds of return values (these are not
- necessarily errors, but we alert anyway since there is no good
- reason to return a value from a test).
- """
- def __init__(self, orig_method):
- self.orig_method = orig_method
-
- def __call__(self, *args, **kwargs):
- result = self.orig_method(*args, **kwargs)
- if isinstance(result, GeneratorType) or iscoroutine(result):
- raise TypeError("Generator and coroutine test methods should be"
- " decorated with tornado.testing.gen_test")
- elif result is not None:
- raise ValueError("Return value from test method ignored: %r" %
- result)
-
- def __getattr__(self, name):
- """Proxy all unknown attributes to the original method.
-
- This is important for some of the decorators in the `unittest`
- module, such as `unittest.skipIf`.
- """
- return getattr(self.orig_method, name)
-
-
-class AsyncTestCase(unittest.TestCase):
- """`~unittest.TestCase` subclass for testing `.IOLoop`-based
- asynchronous code.
-
- The unittest framework is synchronous, so the test must be
- complete by the time the test method returns. This means that
- asynchronous code cannot be used in quite the same way as usual.
- To write test functions that use the same ``yield``-based patterns
- used with the `tornado.gen` module, decorate your test methods
- with `tornado.testing.gen_test` instead of
- `tornado.gen.coroutine`. This class also provides the `stop()`
- and `wait()` methods for a more manual style of testing. The test
- method itself must call ``self.wait()``, and asynchronous
- callbacks should call ``self.stop()`` to signal completion.
-
- By default, a new `.IOLoop` is constructed for each test and is available
- as ``self.io_loop``. This `.IOLoop` should be used in the construction of
- HTTP clients/servers, etc. If the code being tested requires a
- global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
-
- The `.IOLoop`'s ``start`` and ``stop`` methods should not be
- called directly. Instead, use `self.stop <stop>` and `self.wait
- <wait>`. Arguments passed to ``self.stop`` are returned from
- ``self.wait``. It is possible to have multiple ``wait``/``stop``
- cycles in the same test.
-
- Example::
-
- # This test uses coroutine style.
- class MyTestCase(AsyncTestCase):
- @tornado.testing.gen_test
- def test_http_fetch(self):
- client = AsyncHTTPClient(self.io_loop)
- response = yield client.fetch("http://www.tornadoweb.org")
- # Test contents of response
- self.assertIn("FriendFeed", response.body)
-
- # This test uses argument passing between self.stop and self.wait.
- class MyTestCase2(AsyncTestCase):
- def test_http_fetch(self):
- client = AsyncHTTPClient(self.io_loop)
- client.fetch("http://www.tornadoweb.org/", self.stop)
- response = self.wait()
- # Test contents of response
- self.assertIn("FriendFeed", response.body)
-
- # This test uses an explicit callback-based style.
- class MyTestCase3(AsyncTestCase):
- def test_http_fetch(self):
- client = AsyncHTTPClient(self.io_loop)
- client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
- self.wait()
-
- def handle_fetch(self, response):
- # Test contents of response (failures and exceptions here
- # will cause self.wait() to throw an exception and end the
- # test).
- # Exceptions thrown here are magically propagated to
- # self.wait() in test_http_fetch() via stack_context.
- self.assertIn("FriendFeed", response.body)
- self.stop()
- """
- def __init__(self, methodName='runTest'):
- super(AsyncTestCase, self).__init__(methodName)
- self.__stopped = False
- self.__running = False
- self.__failure = None
- self.__stop_args = None
- self.__timeout = None
-
- # It's easy to forget the @gen_test decorator, but if you do
- # the test will silently be ignored because nothing will consume
- # the generator. Replace the test method with a wrapper that will
- # make sure it's not an undecorated generator.
- setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
-
- def setUp(self):
- super(AsyncTestCase, self).setUp()
- self.io_loop = self.get_new_ioloop()
- self.io_loop.make_current()
-
- def tearDown(self):
- # Clean up Subprocess, so it can be used again with a new ioloop.
- Subprocess.uninitialize()
- self.io_loop.clear_current()
- if (not IOLoop.initialized() or
- self.io_loop is not IOLoop.instance()):
- # Try to clean up any file descriptors left open in the ioloop.
- # This avoids leaks, especially when tests are run repeatedly
- # in the same process with autoreload (because curl does not
- # set FD_CLOEXEC on its file descriptors)
- self.io_loop.close(all_fds=True)
- super(AsyncTestCase, self).tearDown()
- # In case an exception escaped or the StackContext caught an exception
- # when there wasn't a wait() to re-raise it, do so here.
- # This is our last chance to raise an exception in a way that the
- # unittest machinery understands.
- self.__rethrow()
-
- def get_new_ioloop(self):
- """Creates a new `.IOLoop` for this test. May be overridden in
- subclasses for tests that require a specific `.IOLoop` (usually
- the singleton `.IOLoop.instance()`).
- """
- return IOLoop()
-
- def _handle_exception(self, typ, value, tb):
- if self.__failure is None:
- self.__failure = (typ, value, tb)
- else:
- app_log.error("multiple unhandled exceptions in test",
- exc_info=(typ, value, tb))
- self.stop()
- return True
-
- def __rethrow(self):
- if self.__failure is not None:
- failure = self.__failure
- self.__failure = None
- raise_exc_info(failure)
-
- def run(self, result=None):
- with ExceptionStackContext(self._handle_exception):
- super(AsyncTestCase, self).run(result)
- # As a last resort, if an exception escaped super.run() and wasn't
- # re-raised in tearDown, raise it here. This will cause the
- # unittest run to fail messily, but that's better than silently
- # ignoring an error.
- self.__rethrow()
-
- def stop(self, _arg=None, **kwargs):
- """Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
- to return.
-
- Keyword arguments or a single positional argument passed to `stop()` are
- saved and will be returned by `wait()`.
- """
- assert _arg is None or not kwargs
- self.__stop_args = kwargs or _arg
- if self.__running:
- self.io_loop.stop()
- self.__running = False
- self.__stopped = True
-
- def wait(self, condition=None, timeout=None):
- """Runs the `.IOLoop` until stop is called or timeout has passed.
-
- In the event of a timeout, an exception will be thrown. The
- default timeout is 5 seconds; it may be overridden with a
- ``timeout`` keyword argument or globally with the
- ``ASYNC_TEST_TIMEOUT`` environment variable.
-
- If ``condition`` is not None, the `.IOLoop` will be restarted
- after `stop()` until ``condition()`` returns true.
-
- .. versionchanged:: 3.1
- Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
- """
- if timeout is None:
- timeout = get_async_test_timeout()
-
- if not self.__stopped:
- if timeout:
- def timeout_func():
- try:
- raise self.failureException(
- 'Async operation timed out after %s seconds' %
- timeout)
- except Exception:
- self.__failure = sys.exc_info()
- self.stop()
- self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
- while True:
- self.__running = True
- self.io_loop.start()
- if (self.__failure is not None or
- condition is None or condition()):
- break
- if self.__timeout is not None:
- self.io_loop.remove_timeout(self.__timeout)
- self.__timeout = None
- assert self.__stopped
- self.__stopped = False
- self.__rethrow()
- result = self.__stop_args
- self.__stop_args = None
- return result
-
-
-class AsyncHTTPTestCase(AsyncTestCase):
- """A test case that starts up an HTTP server.
-
- Subclasses must override `get_app()`, which returns the
- `tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
- Tests will typically use the provided ``self.http_client`` to fetch
- URLs from this server.
-
- Example, assuming the "Hello, world" example from the user guide is in
- ``hello.py``::
-
- import hello
-
- class TestHelloApp(AsyncHTTPTestCase):
- def get_app(self):
- return hello.make_app()
-
- def test_homepage(self):
- response = self.fetch('/')
- self.assertEqual(response.code, 200)
- self.assertEqual(response.body, 'Hello, world')
-
- That call to ``self.fetch()`` is equivalent to ::
-
- self.http_client.fetch(self.get_url('/'), self.stop)
- response = self.wait()
-
- which illustrates how AsyncTestCase can turn an asynchronous operation,
- like ``http_client.fetch()``, into a synchronous operation. If you need
- to do other asynchronous operations in tests, you'll probably need to use
- ``stop()`` and ``wait()`` yourself.
- """
- def setUp(self):
- super(AsyncHTTPTestCase, self).setUp()
- sock, port = bind_unused_port()
- self.__port = port
-
- self.http_client = self.get_http_client()
- self._app = self.get_app()
- self.http_server = self.get_http_server()
- self.http_server.add_sockets([sock])
-
- def get_http_client(self):
- return AsyncHTTPClient(io_loop=self.io_loop)
-
- def get_http_server(self):
- return HTTPServer(self._app, io_loop=self.io_loop,
- **self.get_httpserver_options())
-
- def get_app(self):
- """Should be overridden by subclasses to return a
- `tornado.web.Application` or other `.HTTPServer` callback.
- """
- raise NotImplementedError()
-
- def fetch(self, path, **kwargs):
- """Convenience method to synchronously fetch a url.
-
- The given path will be appended to the local server's host and
- port. Any additional kwargs will be passed directly to
- `.AsyncHTTPClient.fetch` (and so could be used to pass
- ``method="POST"``, ``body="..."``, etc).
- """
- self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
- return self.wait()
-
- def get_httpserver_options(self):
- """May be overridden by subclasses to return additional
- keyword arguments for the server.
- """
- return {}
-
- def get_http_port(self):
- """Returns the port used by the server.
-
- A new port is chosen for each test.
- """
- return self.__port
-
- def get_protocol(self):
- return 'http'
-
- def get_url(self, path):
- """Returns an absolute url for the given path on the test server."""
- return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
- self.get_http_port(), path)
-
- def tearDown(self):
- self.http_server.stop()
- self.io_loop.run_sync(self.http_server.close_all_connections,
- timeout=get_async_test_timeout())
- if (not IOLoop.initialized() or
- self.http_client.io_loop is not IOLoop.instance()):
- self.http_client.close()
- super(AsyncHTTPTestCase, self).tearDown()
-
-
-class AsyncHTTPSTestCase(AsyncHTTPTestCase):
- """A test case that starts an HTTPS server.
-
- Interface is generally the same as `AsyncHTTPTestCase`.
- """
- def get_http_client(self):
- return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
- defaults=dict(validate_cert=False))
-
- def get_httpserver_options(self):
- return dict(ssl_options=self.get_ssl_options())
-
- def get_ssl_options(self):
- """May be overridden by subclasses to select SSL options.
-
- By default includes a self-signed testing certificate.
- """
- # Testing keys were generated with:
- # openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
- module_dir = os.path.dirname(__file__)
- return dict(
- certfile=os.path.join(module_dir, 'test', 'test.crt'),
- keyfile=os.path.join(module_dir, 'test', 'test.key'))
-
- def get_protocol(self):
- return 'https'
-
-
-def gen_test(func=None, timeout=None):
- """Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
-
- ``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
- already running. ``@gen_test`` should be applied to test methods
- on subclasses of `AsyncTestCase`.
-
- Example::
-
- class MyTest(AsyncHTTPTestCase):
- @gen_test
- def test_something(self):
- response = yield gen.Task(self.fetch('/'))
-
- By default, ``@gen_test`` times out after 5 seconds. The timeout may be
- overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
- or for each test with the ``timeout`` keyword argument::
-
- class MyTest(AsyncHTTPTestCase):
- @gen_test(timeout=10)
- def test_something_slow(self):
- response = yield gen.Task(self.fetch('/'))
-
- .. versionadded:: 3.1
- The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
- variable.
-
- .. versionchanged:: 4.0
- The wrapper now passes along ``*args, **kwargs`` so it can be used
- on functions with arguments.
- """
- if timeout is None:
- timeout = get_async_test_timeout()
-
- def wrap(f):
- # Stack up several decorators to allow us to access the generator
- # object itself. In the innermost wrapper, we capture the generator
- # and save it in an attribute of self. Next, we run the wrapped
- # function through @gen.coroutine. Finally, the coroutine is
- # wrapped again to make it synchronous with run_sync.
- #
- # This is a good case study arguing for either some sort of
- # extensibility in the gen decorators or cancellation support.
- @functools.wraps(f)
- def pre_coroutine(self, *args, **kwargs):
- result = f(self, *args, **kwargs)
- if isinstance(result, GeneratorType) or iscoroutine(result):
- self._test_generator = result
- else:
- self._test_generator = None
- return result
-
- if iscoroutinefunction(f):
- coro = pre_coroutine
- else:
- coro = gen.coroutine(pre_coroutine)
-
- @functools.wraps(coro)
- def post_coroutine(self, *args, **kwargs):
- try:
- return self.io_loop.run_sync(
- functools.partial(coro, self, *args, **kwargs),
- timeout=timeout)
- except TimeoutError as e:
- # run_sync raises an error with an unhelpful traceback.
- # Throw it back into the generator or coroutine so the stack
- # trace is replaced by the point where the test is stopped.
- self._test_generator.throw(e)
- # In case the test contains an overly broad except clause,
- # we may get back here. In this case re-raise the original
- # exception, which is better than nothing.
- raise
- return post_coroutine
-
- if func is not None:
- # Used like:
- # @gen_test
- # def f(self):
- # pass
- return wrap(func)
- else:
- # Used like @gen_test(timeout=10)
- return wrap
-
-
-# Without this attribute, nosetests will try to run gen_test as a test
-# anywhere it is imported.
-gen_test.__test__ = False # type: ignore
-
-
-class LogTrapTestCase(unittest.TestCase):
- """A test case that captures and discards all logging output
- if the test passes.
-
- Some libraries can produce a lot of logging output even when
- the test succeeds, so this class can be useful to minimize the noise.
- Simply use it as a base class for your test case. It is safe to combine
- with AsyncTestCase via multiple inheritance
- (``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
-
- This class assumes that only one log handler is configured and
- that it is a `~logging.StreamHandler`. This is true for both
- `logging.basicConfig` and the "pretty logging" configured by
- `tornado.options`. It is not compatible with other log buffering
- mechanisms, such as those provided by some test runners.
-
- .. deprecated:: 4.1
- Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
- """
- def run(self, result=None):
- logger = logging.getLogger()
- if not logger.handlers:
- logging.basicConfig()
- handler = logger.handlers[0]
- if (len(logger.handlers) > 1 or
- not isinstance(handler, logging.StreamHandler)):
- # Logging has been configured in a way we don't recognize,
- # so just leave it alone.
- super(LogTrapTestCase, self).run(result)
- return
- old_stream = handler.stream
- try:
- handler.stream = StringIO()
- gen_log.info("RUNNING TEST: " + str(self))
- old_error_count = len(result.failures) + len(result.errors)
- super(LogTrapTestCase, self).run(result)
- new_error_count = len(result.failures) + len(result.errors)
- if new_error_count != old_error_count:
- old_stream.write(handler.stream.getvalue())
- finally:
- handler.stream = old_stream
-
-
-class ExpectLog(logging.Filter):
- """Context manager to capture and suppress expected log output.
-
- Useful to make tests of error conditions less noisy, while still
- leaving unexpected log entries visible. *Not thread safe.*
-
- The attribute ``logged_stack`` is set to true if any exception
- stack trace was logged.
-
- Usage::
-
- with ExpectLog('tornado.application', "Uncaught exception"):
- error_response = self.fetch("/some_page")
-
- .. versionchanged:: 4.3
- Added the ``logged_stack`` attribute.
- """
- def __init__(self, logger, regex, required=True):
- """Constructs an ExpectLog context manager.
-
- :param logger: Logger object (or name of logger) to watch. Pass
- an empty string to watch the root logger.
- :param regex: Regular expression to match. Any log entries on
- the specified logger that match this regex will be suppressed.
- :param required: If true, an exception will be raised if the end of
- the ``with`` statement is reached without matching any log entries.
- """
- if isinstance(logger, basestring_type):
- logger = logging.getLogger(logger)
- self.logger = logger
- self.regex = re.compile(regex)
- self.required = required
- self.matched = False
- self.logged_stack = False
-
- def filter(self, record):
- if record.exc_info:
- self.logged_stack = True
- message = record.getMessage()
- if self.regex.match(message):
- self.matched = True
- return False
- return True
-
- def __enter__(self):
- self.logger.addFilter(self)
- return self
-
- def __exit__(self, typ, value, tb):
- self.logger.removeFilter(self)
- if not typ and self.required and not self.matched:
- raise Exception("did not get expected log message")
-
-
-def main(**kwargs):
- """A simple test runner.
-
- This test runner is essentially equivalent to `unittest.main` from
- the standard library, but adds support for tornado-style option
- parsing and log formatting. It is *not* necessary to use this
- `main` function to run tests using `AsyncTestCase`; these tests
- are self-contained and can run with any test runner.
-
- The easiest way to run a test is via the command line::
-
- python -m tornado.testing tornado.test.stack_context_test
-
- See the standard library unittest module for ways in which tests can
- be specified.
-
- Projects with many tests may wish to define a test script like
- ``tornado/test/runtests.py``. This script should define a method
- ``all()`` which returns a test suite and then call
- `tornado.testing.main()`. Note that even when a test script is
- used, the ``all()`` test suite may be overridden by naming a
- single test on the command line::
-
- # Runs all tests
- python -m tornado.test.runtests
- # Runs one test
- python -m tornado.test.runtests tornado.test.stack_context_test
-
- Additional keyword arguments passed through to ``unittest.main()``.
- For example, use ``tornado.testing.main(verbosity=2)``
- to show many test details as they are run.
+#!/usr/bin/env python
+"""Support classes for automated testing.
+
+* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
+ with additional support for testing asynchronous (`.IOLoop`-based) code.
+
+* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy.
+
+* `main()`: A simple test runner (wrapper around unittest.main()) with support
+ for the tornado.autoreload module to rerun the tests when code changes.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+try:
+ from tornado import gen
+ from tornado.httpclient import AsyncHTTPClient
+ from tornado.httpserver import HTTPServer
+ from tornado.simple_httpclient import SimpleAsyncHTTPClient
+ from tornado.ioloop import IOLoop, TimeoutError
+ from tornado import netutil
+ from tornado.process import Subprocess
+except ImportError:
+ # These modules are not importable on app engine. Parts of this module
+ # won't work, but e.g. LogTrapTestCase and main() will.
+ AsyncHTTPClient = None # type: ignore
+ gen = None # type: ignore
+ HTTPServer = None # type: ignore
+ IOLoop = None # type: ignore
+ netutil = None # type: ignore
+ SimpleAsyncHTTPClient = None # type: ignore
+ Subprocess = None # type: ignore
+from tornado.log import gen_log, app_log
+from tornado.stack_context import ExceptionStackContext
+from tornado.util import raise_exc_info, basestring_type, PY3
+import functools
+import inspect
+import logging
+import os
+import re
+import signal
+import socket
+import sys
+
+if PY3:
+ from io import StringIO
+else:
+ from cStringIO import StringIO
+
+try:
+ from collections.abc import Generator as GeneratorType # type: ignore
+except ImportError:
+ from types import GeneratorType # type: ignore
+
+if sys.version_info >= (3, 5):
+ iscoroutine = inspect.iscoroutine # type: ignore
+ iscoroutinefunction = inspect.iscoroutinefunction # type: ignore
+else:
+ iscoroutine = iscoroutinefunction = lambda f: False
+
+# Tornado's own test suite requires the updated unittest module
+# (either py27+ or unittest2) so tornado.test.util enforces
+# this requirement, but for other users of tornado.testing we want
+# to allow the older version if unitest2 is not available.
+if PY3:
+ # On python 3, mixing unittest2 and unittest (including doctest)
+ # doesn't seem to work, so always use unittest.
+ import unittest
+else:
+ # On python 2, prefer unittest2 when available.
+ try:
+ import unittest2 as unittest # type: ignore
+ except ImportError:
+ import unittest # type: ignore
+
+_next_port = 10000
+
+
+def get_unused_port():
+ """Returns a (hopefully) unused port number.
+
+ This function does not guarantee that the port it returns is available,
+ only that a series of get_unused_port calls in a single process return
+ distinct ports.
+
+ .. deprecated::
+ Use bind_unused_port instead, which is guaranteed to find an unused port.
+ """
+ global _next_port
+ port = _next_port
+ _next_port = _next_port + 1
+ return port
+
+
+def bind_unused_port(reuse_port=False):
+ """Binds a server socket to an available port on localhost.
+
+ Returns a tuple (socket, port).
+
+ .. versionchanged:: 4.4
+ Always binds to ``127.0.0.1`` without resolving the name
+ ``localhost``.
+ """
+ sock = netutil.bind_sockets(None, '127.0.0.1', family=socket.AF_INET,
+ reuse_port=reuse_port)[0]
+ port = sock.getsockname()[1]
+ return sock, port
+
+
+def get_async_test_timeout():
+ """Get the global timeout setting for async tests.
+
+ Returns a float, the timeout in seconds.
+
+ .. versionadded:: 3.1
+ """
+ try:
+ return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
+ except (ValueError, TypeError):
+ return 5
+
+
+class _TestMethodWrapper(object):
+ """Wraps a test method to raise an error if it returns a value.
+
+ This is mainly used to detect undecorated generators (if a test
+ method yields it must use a decorator to consume the generator),
+ but will also detect other kinds of return values (these are not
+ necessarily errors, but we alert anyway since there is no good
+ reason to return a value from a test).
+ """
+ def __init__(self, orig_method):
+ self.orig_method = orig_method
+
+ def __call__(self, *args, **kwargs):
+ result = self.orig_method(*args, **kwargs)
+ if isinstance(result, GeneratorType) or iscoroutine(result):
+ raise TypeError("Generator and coroutine test methods should be"
+ " decorated with tornado.testing.gen_test")
+ elif result is not None:
+ raise ValueError("Return value from test method ignored: %r" %
+ result)
+
+ def __getattr__(self, name):
+ """Proxy all unknown attributes to the original method.
+
+ This is important for some of the decorators in the `unittest`
+ module, such as `unittest.skipIf`.
+ """
+ return getattr(self.orig_method, name)
+
+
+class AsyncTestCase(unittest.TestCase):
+ """`~unittest.TestCase` subclass for testing `.IOLoop`-based
+ asynchronous code.
+
+ The unittest framework is synchronous, so the test must be
+ complete by the time the test method returns. This means that
+ asynchronous code cannot be used in quite the same way as usual.
+ To write test functions that use the same ``yield``-based patterns
+ used with the `tornado.gen` module, decorate your test methods
+ with `tornado.testing.gen_test` instead of
+ `tornado.gen.coroutine`. This class also provides the `stop()`
+ and `wait()` methods for a more manual style of testing. The test
+ method itself must call ``self.wait()``, and asynchronous
+ callbacks should call ``self.stop()`` to signal completion.
+
+ By default, a new `.IOLoop` is constructed for each test and is available
+ as ``self.io_loop``. This `.IOLoop` should be used in the construction of
+ HTTP clients/servers, etc. If the code being tested requires a
+ global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
+
+ The `.IOLoop`'s ``start`` and ``stop`` methods should not be
+ called directly. Instead, use `self.stop <stop>` and `self.wait
+ <wait>`. Arguments passed to ``self.stop`` are returned from
+ ``self.wait``. It is possible to have multiple ``wait``/``stop``
+ cycles in the same test.
+
+ Example::
+
+ # This test uses coroutine style.
+ class MyTestCase(AsyncTestCase):
+ @tornado.testing.gen_test
+ def test_http_fetch(self):
+ client = AsyncHTTPClient(self.io_loop)
+ response = yield client.fetch("http://www.tornadoweb.org")
+ # Test contents of response
+ self.assertIn("FriendFeed", response.body)
+
+ # This test uses argument passing between self.stop and self.wait.
+ class MyTestCase2(AsyncTestCase):
+ def test_http_fetch(self):
+ client = AsyncHTTPClient(self.io_loop)
+ client.fetch("http://www.tornadoweb.org/", self.stop)
+ response = self.wait()
+ # Test contents of response
+ self.assertIn("FriendFeed", response.body)
+
+ # This test uses an explicit callback-based style.
+ class MyTestCase3(AsyncTestCase):
+ def test_http_fetch(self):
+ client = AsyncHTTPClient(self.io_loop)
+ client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
+ self.wait()
+
+ def handle_fetch(self, response):
+ # Test contents of response (failures and exceptions here
+ # will cause self.wait() to throw an exception and end the
+ # test).
+ # Exceptions thrown here are magically propagated to
+ # self.wait() in test_http_fetch() via stack_context.
+ self.assertIn("FriendFeed", response.body)
+ self.stop()
+ """
+ def __init__(self, methodName='runTest'):
+ super(AsyncTestCase, self).__init__(methodName)
+ self.__stopped = False
+ self.__running = False
+ self.__failure = None
+ self.__stop_args = None
+ self.__timeout = None
+
+ # It's easy to forget the @gen_test decorator, but if you do
+ # the test will silently be ignored because nothing will consume
+ # the generator. Replace the test method with a wrapper that will
+ # make sure it's not an undecorated generator.
+ setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
+
+ def setUp(self):
+ super(AsyncTestCase, self).setUp()
+ self.io_loop = self.get_new_ioloop()
+ self.io_loop.make_current()
+
+ def tearDown(self):
+ # Clean up Subprocess, so it can be used again with a new ioloop.
+ Subprocess.uninitialize()
+ self.io_loop.clear_current()
+ if (not IOLoop.initialized() or
+ self.io_loop is not IOLoop.instance()):
+ # Try to clean up any file descriptors left open in the ioloop.
+ # This avoids leaks, especially when tests are run repeatedly
+ # in the same process with autoreload (because curl does not
+ # set FD_CLOEXEC on its file descriptors)
+ self.io_loop.close(all_fds=True)
+ super(AsyncTestCase, self).tearDown()
+ # In case an exception escaped or the StackContext caught an exception
+ # when there wasn't a wait() to re-raise it, do so here.
+ # This is our last chance to raise an exception in a way that the
+ # unittest machinery understands.
+ self.__rethrow()
+
+ def get_new_ioloop(self):
+ """Creates a new `.IOLoop` for this test. May be overridden in
+ subclasses for tests that require a specific `.IOLoop` (usually
+ the singleton `.IOLoop.instance()`).
+ """
+ return IOLoop()
+
+ def _handle_exception(self, typ, value, tb):
+ if self.__failure is None:
+ self.__failure = (typ, value, tb)
+ else:
+ app_log.error("multiple unhandled exceptions in test",
+ exc_info=(typ, value, tb))
+ self.stop()
+ return True
+
+ def __rethrow(self):
+ if self.__failure is not None:
+ failure = self.__failure
+ self.__failure = None
+ raise_exc_info(failure)
+
+ def run(self, result=None):
+ with ExceptionStackContext(self._handle_exception):
+ super(AsyncTestCase, self).run(result)
+ # As a last resort, if an exception escaped super.run() and wasn't
+ # re-raised in tearDown, raise it here. This will cause the
+ # unittest run to fail messily, but that's better than silently
+ # ignoring an error.
+ self.__rethrow()
+
+ def stop(self, _arg=None, **kwargs):
+ """Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
+ to return.
+
+ Keyword arguments or a single positional argument passed to `stop()` are
+ saved and will be returned by `wait()`.
+ """
+ assert _arg is None or not kwargs
+ self.__stop_args = kwargs or _arg
+ if self.__running:
+ self.io_loop.stop()
+ self.__running = False
+ self.__stopped = True
+
+ def wait(self, condition=None, timeout=None):
+ """Runs the `.IOLoop` until stop is called or timeout has passed.
+
+ In the event of a timeout, an exception will be thrown. The
+ default timeout is 5 seconds; it may be overridden with a
+ ``timeout`` keyword argument or globally with the
+ ``ASYNC_TEST_TIMEOUT`` environment variable.
+
+ If ``condition`` is not None, the `.IOLoop` will be restarted
+ after `stop()` until ``condition()`` returns true.
+
+ .. versionchanged:: 3.1
+ Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
+ """
+ if timeout is None:
+ timeout = get_async_test_timeout()
+
+ if not self.__stopped:
+ if timeout:
+ def timeout_func():
+ try:
+ raise self.failureException(
+ 'Async operation timed out after %s seconds' %
+ timeout)
+ except Exception:
+ self.__failure = sys.exc_info()
+ self.stop()
+ self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
+ while True:
+ self.__running = True
+ self.io_loop.start()
+ if (self.__failure is not None or
+ condition is None or condition()):
+ break
+ if self.__timeout is not None:
+ self.io_loop.remove_timeout(self.__timeout)
+ self.__timeout = None
+ assert self.__stopped
+ self.__stopped = False
+ self.__rethrow()
+ result = self.__stop_args
+ self.__stop_args = None
+ return result
+
+
+class AsyncHTTPTestCase(AsyncTestCase):
+ """A test case that starts up an HTTP server.
+
+ Subclasses must override `get_app()`, which returns the
+ `tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
+ Tests will typically use the provided ``self.http_client`` to fetch
+ URLs from this server.
+
+ Example, assuming the "Hello, world" example from the user guide is in
+ ``hello.py``::
+
+ import hello
+
+ class TestHelloApp(AsyncHTTPTestCase):
+ def get_app(self):
+ return hello.make_app()
+
+ def test_homepage(self):
+ response = self.fetch('/')
+ self.assertEqual(response.code, 200)
+ self.assertEqual(response.body, 'Hello, world')
+
+ That call to ``self.fetch()`` is equivalent to ::
+
+ self.http_client.fetch(self.get_url('/'), self.stop)
+ response = self.wait()
+
+ which illustrates how AsyncTestCase can turn an asynchronous operation,
+ like ``http_client.fetch()``, into a synchronous operation. If you need
+ to do other asynchronous operations in tests, you'll probably need to use
+ ``stop()`` and ``wait()`` yourself.
+ """
+ def setUp(self):
+ super(AsyncHTTPTestCase, self).setUp()
+ sock, port = bind_unused_port()
+ self.__port = port
+
+ self.http_client = self.get_http_client()
+ self._app = self.get_app()
+ self.http_server = self.get_http_server()
+ self.http_server.add_sockets([sock])
+
+ def get_http_client(self):
+ return AsyncHTTPClient(io_loop=self.io_loop)
+
+ def get_http_server(self):
+ return HTTPServer(self._app, io_loop=self.io_loop,
+ **self.get_httpserver_options())
+
+ def get_app(self):
+ """Should be overridden by subclasses to return a
+ `tornado.web.Application` or other `.HTTPServer` callback.
+ """
+ raise NotImplementedError()
+
+ def fetch(self, path, **kwargs):
+ """Convenience method to synchronously fetch a url.
+
+ The given path will be appended to the local server's host and
+ port. Any additional kwargs will be passed directly to
+ `.AsyncHTTPClient.fetch` (and so could be used to pass
+ ``method="POST"``, ``body="..."``, etc).
+ """
+ self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
+ return self.wait()
+
+ def get_httpserver_options(self):
+ """May be overridden by subclasses to return additional
+ keyword arguments for the server.
+ """
+ return {}
+
+ def get_http_port(self):
+ """Returns the port used by the server.
+
+ A new port is chosen for each test.
+ """
+ return self.__port
+
+ def get_protocol(self):
+ return 'http'
+
+ def get_url(self, path):
+ """Returns an absolute url for the given path on the test server."""
+ return '%s://127.0.0.1:%s%s' % (self.get_protocol(),
+ self.get_http_port(), path)
+
+ def tearDown(self):
+ self.http_server.stop()
+ self.io_loop.run_sync(self.http_server.close_all_connections,
+ timeout=get_async_test_timeout())
+ if (not IOLoop.initialized() or
+ self.http_client.io_loop is not IOLoop.instance()):
+ self.http_client.close()
+ super(AsyncHTTPTestCase, self).tearDown()
+
+
+class AsyncHTTPSTestCase(AsyncHTTPTestCase):
+ """A test case that starts an HTTPS server.
+
+ Interface is generally the same as `AsyncHTTPTestCase`.
+ """
+ def get_http_client(self):
+ return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
+ defaults=dict(validate_cert=False))
+
+ def get_httpserver_options(self):
+ return dict(ssl_options=self.get_ssl_options())
+
+ def get_ssl_options(self):
+ """May be overridden by subclasses to select SSL options.
+
+ By default includes a self-signed testing certificate.
+ """
+ # Testing keys were generated with:
+ # openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
+ module_dir = os.path.dirname(__file__)
+ return dict(
+ certfile=os.path.join(module_dir, 'test', 'test.crt'),
+ keyfile=os.path.join(module_dir, 'test', 'test.key'))
+
+ def get_protocol(self):
+ return 'https'
+
+
+def gen_test(func=None, timeout=None):
+ """Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
+
+ ``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
+ already running. ``@gen_test`` should be applied to test methods
+ on subclasses of `AsyncTestCase`.
+
+ Example::
+
+ class MyTest(AsyncHTTPTestCase):
+ @gen_test
+ def test_something(self):
+ response = yield gen.Task(self.fetch('/'))
+
+ By default, ``@gen_test`` times out after 5 seconds. The timeout may be
+ overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
+ or for each test with the ``timeout`` keyword argument::
+
+ class MyTest(AsyncHTTPTestCase):
+ @gen_test(timeout=10)
+ def test_something_slow(self):
+ response = yield gen.Task(self.fetch('/'))
+
+ .. versionadded:: 3.1
+ The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
+ variable.
+
+ .. versionchanged:: 4.0
+ The wrapper now passes along ``*args, **kwargs`` so it can be used
+ on functions with arguments.
+ """
+ if timeout is None:
+ timeout = get_async_test_timeout()
+
+ def wrap(f):
+ # Stack up several decorators to allow us to access the generator
+ # object itself. In the innermost wrapper, we capture the generator
+ # and save it in an attribute of self. Next, we run the wrapped
+ # function through @gen.coroutine. Finally, the coroutine is
+ # wrapped again to make it synchronous with run_sync.
+ #
+ # This is a good case study arguing for either some sort of
+ # extensibility in the gen decorators or cancellation support.
+ @functools.wraps(f)
+ def pre_coroutine(self, *args, **kwargs):
+ result = f(self, *args, **kwargs)
+ if isinstance(result, GeneratorType) or iscoroutine(result):
+ self._test_generator = result
+ else:
+ self._test_generator = None
+ return result
+
+ if iscoroutinefunction(f):
+ coro = pre_coroutine
+ else:
+ coro = gen.coroutine(pre_coroutine)
+
+ @functools.wraps(coro)
+ def post_coroutine(self, *args, **kwargs):
+ try:
+ return self.io_loop.run_sync(
+ functools.partial(coro, self, *args, **kwargs),
+ timeout=timeout)
+ except TimeoutError as e:
+ # run_sync raises an error with an unhelpful traceback.
+ # Throw it back into the generator or coroutine so the stack
+ # trace is replaced by the point where the test is stopped.
+ self._test_generator.throw(e)
+ # In case the test contains an overly broad except clause,
+ # we may get back here. In this case re-raise the original
+ # exception, which is better than nothing.
+ raise
+ return post_coroutine
+
+ if func is not None:
+ # Used like:
+ # @gen_test
+ # def f(self):
+ # pass
+ return wrap(func)
+ else:
+ # Used like @gen_test(timeout=10)
+ return wrap
+
+
+# Without this attribute, nosetests will try to run gen_test as a test
+# anywhere it is imported.
+gen_test.__test__ = False # type: ignore
+
+
+class LogTrapTestCase(unittest.TestCase):
+ """A test case that captures and discards all logging output
+ if the test passes.
+
+ Some libraries can produce a lot of logging output even when
+ the test succeeds, so this class can be useful to minimize the noise.
+ Simply use it as a base class for your test case. It is safe to combine
+ with AsyncTestCase via multiple inheritance
+ (``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
+
+ This class assumes that only one log handler is configured and
+ that it is a `~logging.StreamHandler`. This is true for both
+ `logging.basicConfig` and the "pretty logging" configured by
+ `tornado.options`. It is not compatible with other log buffering
+ mechanisms, such as those provided by some test runners.
+
+ .. deprecated:: 4.1
+ Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
+ """
+ def run(self, result=None):
+ logger = logging.getLogger()
+ if not logger.handlers:
+ logging.basicConfig()
+ handler = logger.handlers[0]
+ if (len(logger.handlers) > 1 or
+ not isinstance(handler, logging.StreamHandler)):
+ # Logging has been configured in a way we don't recognize,
+ # so just leave it alone.
+ super(LogTrapTestCase, self).run(result)
+ return
+ old_stream = handler.stream
+ try:
+ handler.stream = StringIO()
+ gen_log.info("RUNNING TEST: " + str(self))
+ old_error_count = len(result.failures) + len(result.errors)
+ super(LogTrapTestCase, self).run(result)
+ new_error_count = len(result.failures) + len(result.errors)
+ if new_error_count != old_error_count:
+ old_stream.write(handler.stream.getvalue())
+ finally:
+ handler.stream = old_stream
+
+
+class ExpectLog(logging.Filter):
+ """Context manager to capture and suppress expected log output.
+
+ Useful to make tests of error conditions less noisy, while still
+ leaving unexpected log entries visible. *Not thread safe.*
+
+ The attribute ``logged_stack`` is set to true if any exception
+ stack trace was logged.
+
+ Usage::
+
+ with ExpectLog('tornado.application', "Uncaught exception"):
+ error_response = self.fetch("/some_page")
+
+ .. versionchanged:: 4.3
+ Added the ``logged_stack`` attribute.
+ """
+ def __init__(self, logger, regex, required=True):
+ """Constructs an ExpectLog context manager.
+
+ :param logger: Logger object (or name of logger) to watch. Pass
+ an empty string to watch the root logger.
+ :param regex: Regular expression to match. Any log entries on
+ the specified logger that match this regex will be suppressed.
+ :param required: If true, an exception will be raised if the end of
+ the ``with`` statement is reached without matching any log entries.
+ """
+ if isinstance(logger, basestring_type):
+ logger = logging.getLogger(logger)
+ self.logger = logger
+ self.regex = re.compile(regex)
+ self.required = required
+ self.matched = False
+ self.logged_stack = False
+
+ def filter(self, record):
+ if record.exc_info:
+ self.logged_stack = True
+ message = record.getMessage()
+ if self.regex.match(message):
+ self.matched = True
+ return False
+ return True
+
+ def __enter__(self):
+ self.logger.addFilter(self)
+ return self
+
+ def __exit__(self, typ, value, tb):
+ self.logger.removeFilter(self)
+ if not typ and self.required and not self.matched:
+ raise Exception("did not get expected log message")
+
+
+def main(**kwargs):
+ """A simple test runner.
+
+ This test runner is essentially equivalent to `unittest.main` from
+ the standard library, but adds support for tornado-style option
+ parsing and log formatting. It is *not* necessary to use this
+ `main` function to run tests using `AsyncTestCase`; these tests
+ are self-contained and can run with any test runner.
+
+ The easiest way to run a test is via the command line::
+
+ python -m tornado.testing tornado.test.stack_context_test
+
+ See the standard library unittest module for ways in which tests can
+ be specified.
+
+ Projects with many tests may wish to define a test script like
+ ``tornado/test/runtests.py``. This script should define a method
+ ``all()`` which returns a test suite and then call
+ `tornado.testing.main()`. Note that even when a test script is
+ used, the ``all()`` test suite may be overridden by naming a
+ single test on the command line::
+
+ # Runs all tests
+ python -m tornado.test.runtests
+ # Runs one test
+ python -m tornado.test.runtests tornado.test.stack_context_test
+
+ Additional keyword arguments passed through to ``unittest.main()``.
+ For example, use ``tornado.testing.main(verbosity=2)``
+ to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main
- for full argument list.
- """
- from tornado.options import define, options, parse_command_line
-
- define('exception_on_interrupt', type=bool, default=True,
- help=("If true (default), ctrl-c raises a KeyboardInterrupt "
- "exception. This prints a stack trace but cannot interrupt "
- "certain operations. If false, the process is more reliably "
- "killed, but does not print a stack trace."))
-
- # support the same options as unittest's command-line interface
- define('verbose', type=bool)
- define('quiet', type=bool)
- define('failfast', type=bool)
- define('catch', type=bool)
- define('buffer', type=bool)
-
- argv = [sys.argv[0]] + parse_command_line(sys.argv)
-
- if not options.exception_on_interrupt:
- signal.signal(signal.SIGINT, signal.SIG_DFL)
-
- if options.verbose is not None:
- kwargs['verbosity'] = 2
- if options.quiet is not None:
- kwargs['verbosity'] = 0
- if options.failfast is not None:
- kwargs['failfast'] = True
- if options.catch is not None:
- kwargs['catchbreak'] = True
- if options.buffer is not None:
- kwargs['buffer'] = True
-
- if __name__ == '__main__' and len(argv) == 1:
- print("No tests specified", file=sys.stderr)
- sys.exit(1)
- try:
- # In order to be able to run tests by their fully-qualified name
- # on the command line without importing all tests here,
- # module must be set to None. Python 3.2's unittest.main ignores
- # defaultTest if no module is given (it tries to do its own
- # test discovery, which is incompatible with auto2to3), so don't
- # set module if we're not asking for a specific test.
- if len(argv) > 1:
- unittest.main(module=None, argv=argv, **kwargs)
- else:
- unittest.main(defaultTest="all", argv=argv, **kwargs)
- except SystemExit as e:
- if e.code == 0:
- gen_log.info('PASS')
- else:
- gen_log.error('FAIL')
- raise
-
-
-if __name__ == '__main__':
- main()
+ for full argument list.
+ """
+ from tornado.options import define, options, parse_command_line
+
+ define('exception_on_interrupt', type=bool, default=True,
+ help=("If true (default), ctrl-c raises a KeyboardInterrupt "
+ "exception. This prints a stack trace but cannot interrupt "
+ "certain operations. If false, the process is more reliably "
+ "killed, but does not print a stack trace."))
+
+ # support the same options as unittest's command-line interface
+ define('verbose', type=bool)
+ define('quiet', type=bool)
+ define('failfast', type=bool)
+ define('catch', type=bool)
+ define('buffer', type=bool)
+
+ argv = [sys.argv[0]] + parse_command_line(sys.argv)
+
+ if not options.exception_on_interrupt:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ if options.verbose is not None:
+ kwargs['verbosity'] = 2
+ if options.quiet is not None:
+ kwargs['verbosity'] = 0
+ if options.failfast is not None:
+ kwargs['failfast'] = True
+ if options.catch is not None:
+ kwargs['catchbreak'] = True
+ if options.buffer is not None:
+ kwargs['buffer'] = True
+
+ if __name__ == '__main__' and len(argv) == 1:
+ print("No tests specified", file=sys.stderr)
+ sys.exit(1)
+ try:
+ # In order to be able to run tests by their fully-qualified name
+ # on the command line without importing all tests here,
+ # module must be set to None. Python 3.2's unittest.main ignores
+ # defaultTest if no module is given (it tries to do its own
+ # test discovery, which is incompatible with auto2to3), so don't
+ # set module if we're not asking for a specific test.
+ if len(argv) > 1:
+ unittest.main(module=None, argv=argv, **kwargs)
+ else:
+ unittest.main(defaultTest="all", argv=argv, **kwargs)
+ except SystemExit as e:
+ if e.code == 0:
+ gen_log.info('PASS')
+ else:
+ gen_log.error('FAIL')
+ raise
+
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/python/tornado/tornado-4/tornado/util.py b/contrib/python/tornado/tornado-4/tornado/util.py
index 981b94c8ea..e0a1b280fa 100644
--- a/contrib/python/tornado/tornado-4/tornado/util.py
+++ b/contrib/python/tornado/tornado-4/tornado/util.py
@@ -1,475 +1,475 @@
-"""Miscellaneous utility functions and classes.
-
-This module is used internally by Tornado. It is not necessarily expected
-that the functions and classes defined here will be useful to other
-applications, but they are documented here in case they are.
-
-The one public-facing part of this module is the `Configurable` class
-and its `~Configurable.configure` method, which becomes a part of the
-interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
-and `.Resolver`.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import array
-import atexit
-import os
-import re
-import sys
-import zlib
-
-PY3 = sys.version_info >= (3,)
-
-if PY3:
- xrange = range
-
-# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
-# The two functions have compatible interfaces for the parts we need.
-if PY3:
- from inspect import getfullargspec as getargspec
-else:
- from inspect import getargspec
-
-# Aliases for types that are spelled differently in different Python
-# versions. bytes_type is deprecated and no longer used in Tornado
-# itself but is left in case anyone outside Tornado is using it.
-bytes_type = bytes
-if PY3:
- unicode_type = str
- basestring_type = str
-else:
- # The names unicode and basestring don't exist in py3 so silence flake8.
- unicode_type = unicode # noqa
- basestring_type = basestring # noqa
-
-
-try:
- import typing # noqa
- from typing import cast
-
- _ObjectDictBase = typing.Dict[str, typing.Any]
-except ImportError:
- _ObjectDictBase = dict
-
- def cast(typ, x):
- return x
-else:
- # More imports that are only needed in type comments.
- import datetime # noqa
- import types # noqa
- from typing import Any, AnyStr, Union, Optional, Dict, Mapping # noqa
- from typing import Tuple, Match, Callable # noqa
-
- if PY3:
- _BaseString = str
- else:
- _BaseString = Union[bytes, unicode_type]
-
-
-try:
- from sys import is_finalizing
-except ImportError:
- # Emulate it
- def _get_emulated_is_finalizing():
- L = []
- atexit.register(lambda: L.append(None))
-
- def is_finalizing():
- # Not referencing any globals here
- return L != []
-
- return is_finalizing
-
- is_finalizing = _get_emulated_is_finalizing()
-
-
-class ObjectDict(_ObjectDictBase):
- """Makes a dictionary behave like an object, with attribute-style access.
- """
- def __getattr__(self, name):
- # type: (str) -> Any
- try:
- return self[name]
- except KeyError:
- raise AttributeError(name)
-
- def __setattr__(self, name, value):
- # type: (str, Any) -> None
- self[name] = value
-
-
-class GzipDecompressor(object):
- """Streaming gzip decompressor.
-
- The interface is like that of `zlib.decompressobj` (without some of the
- optional arguments, but it understands gzip headers and checksums.
- """
- def __init__(self):
- # Magic parameter makes zlib module understand gzip header
- # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
- # This works on cpython and pypy, but not jython.
- self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
-
- def decompress(self, value, max_length=None):
- # type: (bytes, Optional[int]) -> bytes
- """Decompress a chunk, returning newly-available data.
-
- Some data may be buffered for later processing; `flush` must
- be called when there is no more input data to ensure that
- all data was processed.
-
- If ``max_length`` is given, some input data may be left over
- in ``unconsumed_tail``; you must retrieve this value and pass
- it back to a future call to `decompress` if it is not empty.
- """
- return self.decompressobj.decompress(value, max_length)
-
- @property
- def unconsumed_tail(self):
- # type: () -> bytes
- """Returns the unconsumed portion left over
- """
- return self.decompressobj.unconsumed_tail
-
- def flush(self):
- # type: () -> bytes
- """Return any remaining buffered data not yet returned by decompress.
-
- Also checks for errors such as truncated input.
- No other methods may be called on this object after `flush`.
- """
- return self.decompressobj.flush()
-
-
-def import_object(name):
- # type: (_BaseString) -> Any
- """Imports an object by name.
-
- import_object('x') is equivalent to 'import x'.
- import_object('x.y.z') is equivalent to 'from x.y import z'.
-
- >>> import tornado.escape
- >>> import_object('tornado.escape') is tornado.escape
- True
- >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
- True
- >>> import_object('tornado') is tornado
- True
- >>> import_object('tornado.missing_module')
- Traceback (most recent call last):
- ...
- ImportError: No module named missing_module
- """
- if not isinstance(name, str):
- # on python 2 a byte string is required.
- name = name.encode('utf-8')
- if name.count('.') == 0:
- return __import__(name, None, None)
-
- parts = name.split('.')
- obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
- try:
- return getattr(obj, parts[-1])
- except AttributeError:
- raise ImportError("No module named %s" % parts[-1])
-
-
-# Stubs to make mypy happy (and later for actual type-checking).
-def raise_exc_info(exc_info):
- # type: (Tuple[type, BaseException, types.TracebackType]) -> None
- pass
-
-
-def exec_in(code, glob, loc=None):
- # type: (Any, Dict[str, Any], Optional[Mapping[str, Any]]) -> Any
- if isinstance(code, basestring_type):
- # exec(string) inherits the caller's future imports; compile
- # the string first to prevent that.
- code = compile(code, '<string>', 'exec', dont_inherit=True)
- exec(code, glob, loc)
-
-
-if PY3:
- exec("""
-def raise_exc_info(exc_info):
- try:
- raise exc_info[1].with_traceback(exc_info[2])
- finally:
- exc_info = None
-
-""")
-else:
- exec("""
-def raise_exc_info(exc_info):
- raise exc_info[0], exc_info[1], exc_info[2]
-""")
-
-
-def errno_from_exception(e):
- # type: (BaseException) -> Optional[int]
- """Provides the errno from an Exception object.
-
- There are cases that the errno attribute was not set so we pull
- the errno out of the args but if someone instantiates an Exception
- without any args you will get a tuple error. So this function
- abstracts all that behavior to give you a safe way to get the
- errno.
- """
-
- if hasattr(e, 'errno'):
- return e.errno # type: ignore
- elif e.args:
- return e.args[0]
- else:
- return None
-
-
-_alphanum = frozenset(
- "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
-
-
-def _re_unescape_replacement(match):
- # type: (Match[str]) -> str
- group = match.group(1)
- if group[0] in _alphanum:
- raise ValueError("cannot unescape '\\\\%s'" % group[0])
- return group
-
-
-_re_unescape_pattern = re.compile(r'\\(.)', re.DOTALL)
-
-
-def re_unescape(s):
- # type: (str) -> str
- """Unescape a string escaped by `re.escape`.
-
- May raise ``ValueError`` for regular expressions which could not
- have been produced by `re.escape` (for example, strings containing
- ``\d`` cannot be unescaped).
-
- .. versionadded:: 4.4
- """
- return _re_unescape_pattern.sub(_re_unescape_replacement, s)
-
-
-class Configurable(object):
- """Base class for configurable interfaces.
-
- A configurable interface is an (abstract) class whose constructor
- acts as a factory function for one of its implementation subclasses.
- The implementation subclass as well as optional keyword arguments to
- its initializer can be set globally at runtime with `configure`.
-
- By using the constructor as the factory method, the interface
- looks like a normal class, `isinstance` works as usual, etc. This
- pattern is most useful when the choice of implementation is likely
- to be a global decision (e.g. when `~select.epoll` is available,
- always use it instead of `~select.select`), or when a
- previously-monolithic class has been split into specialized
- subclasses.
-
- Configurable subclasses must define the class methods
- `configurable_base` and `configurable_default`, and use the instance
- method `initialize` instead of ``__init__``.
- """
- __impl_class = None # type: type
- __impl_kwargs = None # type: Dict[str, Any]
-
- def __new__(cls, *args, **kwargs):
- base = cls.configurable_base()
- init_kwargs = {}
- if cls is base:
- impl = cls.configured_class()
- if base.__impl_kwargs:
- init_kwargs.update(base.__impl_kwargs)
- else:
- impl = cls
- init_kwargs.update(kwargs)
- instance = super(Configurable, cls).__new__(impl)
- # initialize vs __init__ chosen for compatibility with AsyncHTTPClient
- # singleton magic. If we get rid of that we can switch to __init__
- # here too.
- instance.initialize(*args, **init_kwargs)
- return instance
-
- @classmethod
- def configurable_base(cls):
- # type: () -> Any
- # TODO: This class needs https://github.com/python/typing/issues/107
- # to be fully typeable.
- """Returns the base class of a configurable hierarchy.
-
- This will normally return the class in which it is defined.
- (which is *not* necessarily the same as the cls classmethod parameter).
- """
- raise NotImplementedError()
-
- @classmethod
- def configurable_default(cls):
- # type: () -> type
- """Returns the implementation class to be used if none is configured."""
- raise NotImplementedError()
-
- def initialize(self):
- # type: () -> None
- """Initialize a `Configurable` subclass instance.
-
- Configurable classes should use `initialize` instead of ``__init__``.
-
- .. versionchanged:: 4.2
- Now accepts positional arguments in addition to keyword arguments.
- """
-
- @classmethod
- def configure(cls, impl, **kwargs):
- # type: (Any, **Any) -> None
- """Sets the class to use when the base class is instantiated.
-
- Keyword arguments will be saved and added to the arguments passed
- to the constructor. This can be used to set global defaults for
- some parameters.
- """
- base = cls.configurable_base()
- if isinstance(impl, (str, unicode_type)):
- impl = import_object(impl)
- if impl is not None and not issubclass(impl, cls):
- raise ValueError("Invalid subclass of %s" % cls)
- base.__impl_class = impl
- base.__impl_kwargs = kwargs
-
- @classmethod
- def configured_class(cls):
- # type: () -> type
- """Returns the currently configured class."""
- base = cls.configurable_base()
- if cls.__impl_class is None:
- base.__impl_class = cls.configurable_default()
- return base.__impl_class
-
- @classmethod
- def _save_configuration(cls):
- # type: () -> Tuple[type, Dict[str, Any]]
- base = cls.configurable_base()
- return (base.__impl_class, base.__impl_kwargs)
-
- @classmethod
- def _restore_configuration(cls, saved):
- # type: (Tuple[type, Dict[str, Any]]) -> None
- base = cls.configurable_base()
- base.__impl_class = saved[0]
- base.__impl_kwargs = saved[1]
-
-
-class ArgReplacer(object):
- """Replaces one value in an ``args, kwargs`` pair.
-
- Inspects the function signature to find an argument by name
- whether it is passed by position or keyword. For use in decorators
- and similar wrappers.
- """
- def __init__(self, func, name):
- # type: (Callable, str) -> None
- self.name = name
- try:
- self.arg_pos = self._getargnames(func).index(name)
- except ValueError:
- # Not a positional parameter
- self.arg_pos = None
-
- def _getargnames(self, func):
- # type: (Callable) -> List[str]
- try:
- return getargspec(func).args
- except TypeError:
- if hasattr(func, 'func_code'):
- # Cython-generated code has all the attributes needed
- # by inspect.getargspec, but the inspect module only
- # works with ordinary functions. Inline the portion of
- # getargspec that we need here. Note that for static
- # functions the @cython.binding(True) decorator must
- # be used (for methods it works out of the box).
- code = func.func_code # type: ignore
- return code.co_varnames[:code.co_argcount]
- raise
-
- def get_old_value(self, args, kwargs, default=None):
- # type: (List[Any], Dict[str, Any], Any) -> Any
- """Returns the old value of the named argument without replacing it.
-
- Returns ``default`` if the argument is not present.
- """
- if self.arg_pos is not None and len(args) > self.arg_pos:
- return args[self.arg_pos]
- else:
- return kwargs.get(self.name, default)
-
- def replace(self, new_value, args, kwargs):
- # type: (Any, List[Any], Dict[str, Any]) -> Tuple[Any, List[Any], Dict[str, Any]]
- """Replace the named argument in ``args, kwargs`` with ``new_value``.
-
- Returns ``(old_value, args, kwargs)``. The returned ``args`` and
- ``kwargs`` objects may not be the same as the input objects, or
- the input objects may be mutated.
-
- If the named argument was not found, ``new_value`` will be added
- to ``kwargs`` and None will be returned as ``old_value``.
- """
- if self.arg_pos is not None and len(args) > self.arg_pos:
- # The arg to replace is passed positionally
- old_value = args[self.arg_pos]
- args = list(args) # *args is normally a tuple
- args[self.arg_pos] = new_value
- else:
- # The arg to replace is either omitted or passed by keyword.
- old_value = kwargs.get(self.name)
- kwargs[self.name] = new_value
- return old_value, args, kwargs
-
-
-def timedelta_to_seconds(td):
- # type: (datetime.timedelta) -> float
- """Equivalent to td.total_seconds() (introduced in python 2.7)."""
- return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
-
-
-def _websocket_mask_python(mask, data):
- # type: (bytes, bytes) -> bytes
- """Websocket masking function.
-
- `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
- Returns a `bytes` object of the same length as `data` with the mask applied
- as specified in section 5.3 of RFC 6455.
-
- This pure-python implementation may be replaced by an optimized version when available.
- """
- mask_arr = array.array("B", mask)
- unmasked_arr = array.array("B", data)
- for i in xrange(len(data)):
- unmasked_arr[i] = unmasked_arr[i] ^ mask_arr[i % 4]
- if PY3:
- # tostring was deprecated in py32. It hasn't been removed,
- # but since we turn on deprecation warnings in our tests
- # we need to use the right one.
- return unmasked_arr.tobytes()
- else:
- return unmasked_arr.tostring()
-
-
-if (os.environ.get('TORNADO_NO_EXTENSION') or
- os.environ.get('TORNADO_EXTENSION') == '0'):
- # These environment variables exist to make it easier to do performance
- # comparisons; they are not guaranteed to remain supported in the future.
- _websocket_mask = _websocket_mask_python
-else:
- try:
- from tornado.speedups import websocket_mask as _websocket_mask
- except ImportError:
- if os.environ.get('TORNADO_EXTENSION') == '1':
- raise
- _websocket_mask = _websocket_mask_python
-
-
-def doctests():
- import doctest
- return doctest.DocTestSuite()
+"""Miscellaneous utility functions and classes.
+
+This module is used internally by Tornado. It is not necessarily expected
+that the functions and classes defined here will be useful to other
+applications, but they are documented here in case they are.
+
+The one public-facing part of this module is the `Configurable` class
+and its `~Configurable.configure` method, which becomes a part of the
+interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
+and `.Resolver`.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import array
+import atexit
+import os
+import re
+import sys
+import zlib
+
+PY3 = sys.version_info >= (3,)
+
+if PY3:
+ xrange = range
+
+# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
+# The two functions have compatible interfaces for the parts we need.
+if PY3:
+ from inspect import getfullargspec as getargspec
+else:
+ from inspect import getargspec
+
+# Aliases for types that are spelled differently in different Python
+# versions. bytes_type is deprecated and no longer used in Tornado
+# itself but is left in case anyone outside Tornado is using it.
+bytes_type = bytes
+if PY3:
+ unicode_type = str
+ basestring_type = str
+else:
+ # The names unicode and basestring don't exist in py3 so silence flake8.
+ unicode_type = unicode # noqa
+ basestring_type = basestring # noqa
+
+
+try:
+ import typing # noqa
+ from typing import cast
+
+ _ObjectDictBase = typing.Dict[str, typing.Any]
+except ImportError:
+ _ObjectDictBase = dict
+
+ def cast(typ, x):
+ return x
+else:
+ # More imports that are only needed in type comments.
+ import datetime # noqa
+ import types # noqa
+ from typing import Any, AnyStr, Union, Optional, Dict, Mapping # noqa
+ from typing import Tuple, Match, Callable # noqa
+
+ if PY3:
+ _BaseString = str
+ else:
+ _BaseString = Union[bytes, unicode_type]
+
+
+try:
+ from sys import is_finalizing
+except ImportError:
+ # Emulate it
+ def _get_emulated_is_finalizing():
+ L = []
+ atexit.register(lambda: L.append(None))
+
+ def is_finalizing():
+ # Not referencing any globals here
+ return L != []
+
+ return is_finalizing
+
+ is_finalizing = _get_emulated_is_finalizing()
+
+
+class ObjectDict(_ObjectDictBase):
+ """Makes a dictionary behave like an object, with attribute-style access.
+ """
+ def __getattr__(self, name):
+ # type: (str) -> Any
+ try:
+ return self[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __setattr__(self, name, value):
+ # type: (str, Any) -> None
+ self[name] = value
+
+
+class GzipDecompressor(object):
+ """Streaming gzip decompressor.
+
+ The interface is like that of `zlib.decompressobj` (without some of the
+ optional arguments, but it understands gzip headers and checksums.
+ """
+ def __init__(self):
+ # Magic parameter makes zlib module understand gzip header
+ # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
+ # This works on cpython and pypy, but not jython.
+ self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+ def decompress(self, value, max_length=None):
+ # type: (bytes, Optional[int]) -> bytes
+ """Decompress a chunk, returning newly-available data.
+
+ Some data may be buffered for later processing; `flush` must
+ be called when there is no more input data to ensure that
+ all data was processed.
+
+ If ``max_length`` is given, some input data may be left over
+ in ``unconsumed_tail``; you must retrieve this value and pass
+ it back to a future call to `decompress` if it is not empty.
+ """
+ return self.decompressobj.decompress(value, max_length)
+
+ @property
+ def unconsumed_tail(self):
+ # type: () -> bytes
+ """Returns the unconsumed portion left over
+ """
+ return self.decompressobj.unconsumed_tail
+
+ def flush(self):
+ # type: () -> bytes
+ """Return any remaining buffered data not yet returned by decompress.
+
+ Also checks for errors such as truncated input.
+ No other methods may be called on this object after `flush`.
+ """
+ return self.decompressobj.flush()
+
+
+def import_object(name):
+ # type: (_BaseString) -> Any
+ """Imports an object by name.
+
+ import_object('x') is equivalent to 'import x'.
+ import_object('x.y.z') is equivalent to 'from x.y import z'.
+
+ >>> import tornado.escape
+ >>> import_object('tornado.escape') is tornado.escape
+ True
+ >>> import_object('tornado.escape.utf8') is tornado.escape.utf8
+ True
+ >>> import_object('tornado') is tornado
+ True
+ >>> import_object('tornado.missing_module')
+ Traceback (most recent call last):
+ ...
+ ImportError: No module named missing_module
+ """
+ if not isinstance(name, str):
+ # on python 2 a byte string is required.
+ name = name.encode('utf-8')
+ if name.count('.') == 0:
+ return __import__(name, None, None)
+
+ parts = name.split('.')
+ obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
+ try:
+ return getattr(obj, parts[-1])
+ except AttributeError:
+ raise ImportError("No module named %s" % parts[-1])
+
+
+# Stubs to make mypy happy (and later for actual type-checking).
+def raise_exc_info(exc_info):
+ # type: (Tuple[type, BaseException, types.TracebackType]) -> None
+ pass
+
+
+def exec_in(code, glob, loc=None):
+ # type: (Any, Dict[str, Any], Optional[Mapping[str, Any]]) -> Any
+ if isinstance(code, basestring_type):
+ # exec(string) inherits the caller's future imports; compile
+ # the string first to prevent that.
+ code = compile(code, '<string>', 'exec', dont_inherit=True)
+ exec(code, glob, loc)
+
+
+if PY3:
+ exec("""
+def raise_exc_info(exc_info):
+ try:
+ raise exc_info[1].with_traceback(exc_info[2])
+ finally:
+ exc_info = None
+
+""")
+else:
+ exec("""
+def raise_exc_info(exc_info):
+ raise exc_info[0], exc_info[1], exc_info[2]
+""")
+
+
+def errno_from_exception(e):
+ # type: (BaseException) -> Optional[int]
+ """Provides the errno from an Exception object.
+
+ There are cases that the errno attribute was not set so we pull
+ the errno out of the args but if someone instantiates an Exception
+ without any args you will get a tuple error. So this function
+ abstracts all that behavior to give you a safe way to get the
+ errno.
+ """
+
+ if hasattr(e, 'errno'):
+ return e.errno # type: ignore
+ elif e.args:
+ return e.args[0]
+ else:
+ return None
+
+
+_alphanum = frozenset(
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
+
+
+def _re_unescape_replacement(match):
+ # type: (Match[str]) -> str
+ group = match.group(1)
+ if group[0] in _alphanum:
+ raise ValueError("cannot unescape '\\\\%s'" % group[0])
+ return group
+
+
+_re_unescape_pattern = re.compile(r'\\(.)', re.DOTALL)
+
+
+def re_unescape(s):
+ # type: (str) -> str
+ """Unescape a string escaped by `re.escape`.
+
+ May raise ``ValueError`` for regular expressions which could not
+ have been produced by `re.escape` (for example, strings containing
+ ``\d`` cannot be unescaped).
+
+ .. versionadded:: 4.4
+ """
+ return _re_unescape_pattern.sub(_re_unescape_replacement, s)
+
+
+class Configurable(object):
+ """Base class for configurable interfaces.
+
+ A configurable interface is an (abstract) class whose constructor
+ acts as a factory function for one of its implementation subclasses.
+ The implementation subclass as well as optional keyword arguments to
+ its initializer can be set globally at runtime with `configure`.
+
+ By using the constructor as the factory method, the interface
+ looks like a normal class, `isinstance` works as usual, etc. This
+ pattern is most useful when the choice of implementation is likely
+ to be a global decision (e.g. when `~select.epoll` is available,
+ always use it instead of `~select.select`), or when a
+ previously-monolithic class has been split into specialized
+ subclasses.
+
+ Configurable subclasses must define the class methods
+ `configurable_base` and `configurable_default`, and use the instance
+ method `initialize` instead of ``__init__``.
+ """
+ __impl_class = None # type: type
+ __impl_kwargs = None # type: Dict[str, Any]
+
+ def __new__(cls, *args, **kwargs):
+ base = cls.configurable_base()
+ init_kwargs = {}
+ if cls is base:
+ impl = cls.configured_class()
+ if base.__impl_kwargs:
+ init_kwargs.update(base.__impl_kwargs)
+ else:
+ impl = cls
+ init_kwargs.update(kwargs)
+ instance = super(Configurable, cls).__new__(impl)
+ # initialize vs __init__ chosen for compatibility with AsyncHTTPClient
+ # singleton magic. If we get rid of that we can switch to __init__
+ # here too.
+ instance.initialize(*args, **init_kwargs)
+ return instance
+
+ @classmethod
+ def configurable_base(cls):
+ # type: () -> Any
+ # TODO: This class needs https://github.com/python/typing/issues/107
+ # to be fully typeable.
+ """Returns the base class of a configurable hierarchy.
+
+ This will normally return the class in which it is defined.
+ (which is *not* necessarily the same as the cls classmethod parameter).
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def configurable_default(cls):
+ # type: () -> type
+ """Returns the implementation class to be used if none is configured."""
+ raise NotImplementedError()
+
+ def initialize(self):
+ # type: () -> None
+ """Initialize a `Configurable` subclass instance.
+
+ Configurable classes should use `initialize` instead of ``__init__``.
+
+ .. versionchanged:: 4.2
+ Now accepts positional arguments in addition to keyword arguments.
+ """
+
+ @classmethod
+ def configure(cls, impl, **kwargs):
+ # type: (Any, **Any) -> None
+ """Sets the class to use when the base class is instantiated.
+
+ Keyword arguments will be saved and added to the arguments passed
+ to the constructor. This can be used to set global defaults for
+ some parameters.
+ """
+ base = cls.configurable_base()
+ if isinstance(impl, (str, unicode_type)):
+ impl = import_object(impl)
+ if impl is not None and not issubclass(impl, cls):
+ raise ValueError("Invalid subclass of %s" % cls)
+ base.__impl_class = impl
+ base.__impl_kwargs = kwargs
+
+ @classmethod
+ def configured_class(cls):
+ # type: () -> type
+ """Returns the currently configured class."""
+ base = cls.configurable_base()
+ if cls.__impl_class is None:
+ base.__impl_class = cls.configurable_default()
+ return base.__impl_class
+
+ @classmethod
+ def _save_configuration(cls):
+ # type: () -> Tuple[type, Dict[str, Any]]
+ base = cls.configurable_base()
+ return (base.__impl_class, base.__impl_kwargs)
+
+ @classmethod
+ def _restore_configuration(cls, saved):
+ # type: (Tuple[type, Dict[str, Any]]) -> None
+ base = cls.configurable_base()
+ base.__impl_class = saved[0]
+ base.__impl_kwargs = saved[1]
+
+
+class ArgReplacer(object):
+ """Replaces one value in an ``args, kwargs`` pair.
+
+ Inspects the function signature to find an argument by name
+ whether it is passed by position or keyword. For use in decorators
+ and similar wrappers.
+ """
+ def __init__(self, func, name):
+ # type: (Callable, str) -> None
+ self.name = name
+ try:
+ self.arg_pos = self._getargnames(func).index(name)
+ except ValueError:
+ # Not a positional parameter
+ self.arg_pos = None
+
+ def _getargnames(self, func):
+ # type: (Callable) -> List[str]
+ try:
+ return getargspec(func).args
+ except TypeError:
+ if hasattr(func, 'func_code'):
+ # Cython-generated code has all the attributes needed
+ # by inspect.getargspec, but the inspect module only
+ # works with ordinary functions. Inline the portion of
+ # getargspec that we need here. Note that for static
+ # functions the @cython.binding(True) decorator must
+ # be used (for methods it works out of the box).
+ code = func.func_code # type: ignore
+ return code.co_varnames[:code.co_argcount]
+ raise
+
+ def get_old_value(self, args, kwargs, default=None):
+ # type: (List[Any], Dict[str, Any], Any) -> Any
+ """Returns the old value of the named argument without replacing it.
+
+ Returns ``default`` if the argument is not present.
+ """
+ if self.arg_pos is not None and len(args) > self.arg_pos:
+ return args[self.arg_pos]
+ else:
+ return kwargs.get(self.name, default)
+
+ def replace(self, new_value, args, kwargs):
+ # type: (Any, List[Any], Dict[str, Any]) -> Tuple[Any, List[Any], Dict[str, Any]]
+ """Replace the named argument in ``args, kwargs`` with ``new_value``.
+
+ Returns ``(old_value, args, kwargs)``. The returned ``args`` and
+ ``kwargs`` objects may not be the same as the input objects, or
+ the input objects may be mutated.
+
+ If the named argument was not found, ``new_value`` will be added
+ to ``kwargs`` and None will be returned as ``old_value``.
+ """
+ if self.arg_pos is not None and len(args) > self.arg_pos:
+ # The arg to replace is passed positionally
+ old_value = args[self.arg_pos]
+ args = list(args) # *args is normally a tuple
+ args[self.arg_pos] = new_value
+ else:
+ # The arg to replace is either omitted or passed by keyword.
+ old_value = kwargs.get(self.name)
+ kwargs[self.name] = new_value
+ return old_value, args, kwargs
+
+
+def timedelta_to_seconds(td):
+ # type: (datetime.timedelta) -> float
+ """Equivalent to td.total_seconds() (introduced in python 2.7)."""
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
+
+
+def _websocket_mask_python(mask, data):
+ # type: (bytes, bytes) -> bytes
+ """Websocket masking function.
+
+ `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
+ Returns a `bytes` object of the same length as `data` with the mask applied
+ as specified in section 5.3 of RFC 6455.
+
+ This pure-python implementation may be replaced by an optimized version when available.
+ """
+ mask_arr = array.array("B", mask)
+ unmasked_arr = array.array("B", data)
+ for i in xrange(len(data)):
+ unmasked_arr[i] = unmasked_arr[i] ^ mask_arr[i % 4]
+ if PY3:
+ # tostring was deprecated in py32. It hasn't been removed,
+ # but since we turn on deprecation warnings in our tests
+ # we need to use the right one.
+ return unmasked_arr.tobytes()
+ else:
+ return unmasked_arr.tostring()
+
+
+if (os.environ.get('TORNADO_NO_EXTENSION') or
+ os.environ.get('TORNADO_EXTENSION') == '0'):
+ # These environment variables exist to make it easier to do performance
+ # comparisons; they are not guaranteed to remain supported in the future.
+ _websocket_mask = _websocket_mask_python
+else:
+ try:
+ from tornado.speedups import websocket_mask as _websocket_mask
+ except ImportError:
+ if os.environ.get('TORNADO_EXTENSION') == '1':
+ raise
+ _websocket_mask = _websocket_mask_python
+
+
+def doctests():
+ import doctest
+ return doctest.DocTestSuite()
diff --git a/contrib/python/tornado/tornado-4/tornado/web.py b/contrib/python/tornado/tornado-4/tornado/web.py
index e8d102b50e..e67a867684 100644
--- a/contrib/python/tornado/tornado-4/tornado/web.py
+++ b/contrib/python/tornado/tornado-4/tornado/web.py
@@ -1,3286 +1,3286 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""``tornado.web`` provides a simple web framework with asynchronous
-features that allow it to scale to large numbers of open connections,
-making it ideal for `long polling
-<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
-
-Here is a simple "Hello, world" example app:
-
-.. testcode::
-
- import tornado.ioloop
- import tornado.web
-
- class MainHandler(tornado.web.RequestHandler):
- def get(self):
- self.write("Hello, world")
-
- if __name__ == "__main__":
- application = tornado.web.Application([
- (r"/", MainHandler),
- ])
- application.listen(8888)
- tornado.ioloop.IOLoop.current().start()
-
-.. testoutput::
- :hide:
-
-
-See the :doc:`guide` for additional information.
-
-Thread-safety notes
--------------------
-
-In general, methods on `RequestHandler` and elsewhere in Tornado are
-not thread-safe. In particular, methods such as
-`~RequestHandler.write()`, `~RequestHandler.finish()`, and
-`~RequestHandler.flush()` must only be called from the main thread. If
-you use multiple threads it is important to use `.IOLoop.add_callback`
-to transfer control back to the main thread before finishing the
-request.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import base64
-import binascii
-import datetime
-import email.utils
-import functools
-import gzip
-import hashlib
-import hmac
-import mimetypes
-import numbers
-import os.path
-import re
-import stat
-import sys
-import threading
-import time
-import tornado
-import traceback
-import types
-from inspect import isclass
-from io import BytesIO
-
-from tornado.concurrent import Future
-from tornado import escape
-from tornado import gen
-from tornado import httputil
-from tornado import iostream
-from tornado import locale
-from tornado.log import access_log, app_log, gen_log
-from tornado import stack_context
-from tornado import template
-from tornado.escape import utf8, _unicode
-from tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
- ReversibleRouter, Rule, ReversibleRuleRouter,
- URLSpec)
-from tornado.util import (ObjectDict, raise_exc_info,
- unicode_type, _websocket_mask, PY3)
-
-url = URLSpec
-
-if PY3:
- import http.cookies as Cookie
- import urllib.parse as urlparse
- from urllib.parse import urlencode
-else:
- import Cookie
- import urlparse
- from urllib import urlencode
-
-try:
- import typing # noqa
-
- # The following types are accepted by RequestHandler.set_header
- # and related methods.
- _HeaderTypes = typing.Union[bytes, unicode_type,
- numbers.Integral, datetime.datetime]
-except ImportError:
- pass
-
-
-MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
-"""The oldest signed value version supported by this version of Tornado.
-
-Signed values older than this version cannot be decoded.
-
-.. versionadded:: 3.2.1
-"""
-
-MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
-"""The newest signed value version supported by this version of Tornado.
-
-Signed values newer than this version cannot be decoded.
-
-.. versionadded:: 3.2.1
-"""
-
-DEFAULT_SIGNED_VALUE_VERSION = 2
-"""The signed value version produced by `.RequestHandler.create_signed_value`.
-
-May be overridden by passing a ``version`` keyword argument.
-
-.. versionadded:: 3.2.1
-"""
-
-DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
-"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
-
-May be overridden by passing a ``min_version`` keyword argument.
-
-.. versionadded:: 3.2.1
-"""
-
-
-class RequestHandler(object):
- """Base class for HTTP request handlers.
-
- Subclasses must define at least one of the methods defined in the
- "Entry points" section below.
- """
- SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
- "OPTIONS")
-
- _template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
- _template_loader_lock = threading.Lock()
- _remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
-
- def __init__(self, application, request, **kwargs):
- super(RequestHandler, self).__init__()
-
- self.application = application
- self.request = request
- self._headers_written = False
- self._finished = False
- self._auto_finish = True
- self._transforms = None # will be set in _execute
- self._prepared_future = None
- self._headers = None # type: httputil.HTTPHeaders
- self.path_args = None
- self.path_kwargs = None
- self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
- application.ui_methods.items())
- # UIModules are available as both `modules` and `_tt_modules` in the
- # template namespace. Historically only `modules` was available
- # but could be clobbered by user additions to the namespace.
- # The template {% module %} directive looks in `_tt_modules` to avoid
- # possible conflicts.
- self.ui["_tt_modules"] = _UIModuleNamespace(self,
- application.ui_modules)
- self.ui["modules"] = self.ui["_tt_modules"]
- self.clear()
- self.request.connection.set_close_callback(self.on_connection_close)
- self.initialize(**kwargs)
-
- def initialize(self):
- """Hook for subclass initialization. Called for each request.
-
- A dictionary passed as the third argument of a url spec will be
- supplied as keyword arguments to initialize().
-
- Example::
-
- class ProfileHandler(RequestHandler):
- def initialize(self, database):
- self.database = database
-
- def get(self, username):
- ...
-
- app = Application([
- (r'/user/(.*)', ProfileHandler, dict(database=database)),
- ])
- """
- pass
-
- @property
- def settings(self):
- """An alias for `self.application.settings <Application.settings>`."""
- return self.application.settings
-
- def head(self, *args, **kwargs):
- raise HTTPError(405)
-
- def get(self, *args, **kwargs):
- raise HTTPError(405)
-
- def post(self, *args, **kwargs):
- raise HTTPError(405)
-
- def delete(self, *args, **kwargs):
- raise HTTPError(405)
-
- def patch(self, *args, **kwargs):
- raise HTTPError(405)
-
- def put(self, *args, **kwargs):
- raise HTTPError(405)
-
- def options(self, *args, **kwargs):
- raise HTTPError(405)
-
- def prepare(self):
- """Called at the beginning of a request before `get`/`post`/etc.
-
- Override this method to perform common initialization regardless
- of the request method.
-
- Asynchronous support: Decorate this method with `.gen.coroutine`
- or `.return_future` to make it asynchronous (the
- `asynchronous` decorator cannot be used on `prepare`).
- If this method returns a `.Future` execution will not proceed
- until the `.Future` is done.
-
- .. versionadded:: 3.1
- Asynchronous support.
- """
- pass
-
- def on_finish(self):
- """Called after the end of a request.
-
- Override this method to perform cleanup, logging, etc.
- This method is a counterpart to `prepare`. ``on_finish`` may
- not produce any output, as it is called after the response
- has been sent to the client.
- """
- pass
-
- def on_connection_close(self):
- """Called in async handlers if the client closed the connection.
-
- Override this to clean up resources associated with
- long-lived connections. Note that this method is called only if
- the connection was closed during asynchronous processing; if you
- need to do cleanup after every request override `on_finish`
- instead.
-
- Proxies may keep a connection open for a time (perhaps
- indefinitely) after the client has gone away, so this method
- may not be called promptly after the end user closes their
- connection.
- """
- if _has_stream_request_body(self.__class__):
- if not self.request.body.done():
- self.request.body.set_exception(iostream.StreamClosedError())
- self.request.body.exception()
-
- def clear(self):
- """Resets all headers and content for this response."""
- self._headers = httputil.HTTPHeaders({
- "Server": "TornadoServer/%s" % tornado.version,
- "Content-Type": "text/html; charset=UTF-8",
- "Date": httputil.format_timestamp(time.time()),
- })
- self.set_default_headers()
- self._write_buffer = []
- self._status_code = 200
- self._reason = httputil.responses[200]
-
- def set_default_headers(self):
- """Override this to set HTTP headers at the beginning of the request.
-
- For example, this is the place to set a custom ``Server`` header.
- Note that setting such headers in the normal flow of request
- processing may not do what you want, since headers may be reset
- during error handling.
- """
- pass
-
- def set_status(self, status_code, reason=None):
- """Sets the status code for our response.
-
- :arg int status_code: Response status code. If ``reason`` is ``None``,
- it must be present in `httplib.responses <http.client.responses>`.
- :arg string reason: Human-readable reason phrase describing the status
- code. If ``None``, it will be filled in from
- `httplib.responses <http.client.responses>`.
- """
- self._status_code = status_code
- if reason is not None:
- self._reason = escape.native_str(reason)
- else:
- try:
- self._reason = httputil.responses[status_code]
- except KeyError:
- raise ValueError("unknown status code %d" % status_code)
-
- def get_status(self):
- """Returns the status code for our response."""
- return self._status_code
-
- def set_header(self, name, value):
- # type: (str, _HeaderTypes) -> None
- """Sets the given response header name and value.
-
- If a datetime is given, we automatically format it according to the
- HTTP specification. If the value is not a string, we convert it to
- a string. All header values are then encoded as UTF-8.
- """
- self._headers[name] = self._convert_header_value(value)
-
- def add_header(self, name, value):
- # type: (str, _HeaderTypes) -> None
- """Adds the given response header and value.
-
- Unlike `set_header`, `add_header` may be called multiple times
- to return multiple values for the same header.
- """
- self._headers.add(name, self._convert_header_value(value))
-
- def clear_header(self, name):
- """Clears an outgoing header, undoing a previous `set_header` call.
-
- Note that this method does not apply to multi-valued headers
- set by `add_header`.
- """
- if name in self._headers:
- del self._headers[name]
-
- _INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
-
- def _convert_header_value(self, value):
- # type: (_HeaderTypes) -> str
-
- # Convert the input value to a str. This type check is a bit
- # subtle: The bytes case only executes on python 3, and the
- # unicode case only executes on python 2, because the other
- # cases are covered by the first match for str.
- if isinstance(value, str):
- retval = value
- elif isinstance(value, bytes): # py3
- # Non-ascii characters in headers are not well supported,
- # but if you pass bytes, use latin1 so they pass through as-is.
- retval = value.decode('latin1')
- elif isinstance(value, unicode_type): # py2
- # TODO: This is inconsistent with the use of latin1 above,
- # but it's been that way for a long time. Should it change?
- retval = escape.utf8(value)
- elif isinstance(value, numbers.Integral):
- # return immediately since we know the converted value will be safe
- return str(value)
- elif isinstance(value, datetime.datetime):
- return httputil.format_timestamp(value)
- else:
- raise TypeError("Unsupported header value %r" % value)
- # If \n is allowed into the header, it is possible to inject
- # additional headers or split the request.
- if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
- raise ValueError("Unsafe header value %r", retval)
- return retval
-
- _ARG_DEFAULT = object()
-
- def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
- """Returns the value of the argument with the given name.
-
- If default is not provided, the argument is considered to be
- required, and we raise a `MissingArgumentError` if it is missing.
-
- If the argument appears in the url more than once, we return the
- last value.
-
- The returned value is always unicode.
- """
- return self._get_argument(name, default, self.request.arguments, strip)
-
- def get_arguments(self, name, strip=True):
- """Returns a list of the arguments with the given name.
-
- If the argument is not present, returns an empty list.
-
- The returned values are always unicode.
- """
-
- # Make sure `get_arguments` isn't accidentally being called with a
- # positional argument that's assumed to be a default (like in
- # `get_argument`.)
- assert isinstance(strip, bool)
-
- return self._get_arguments(name, self.request.arguments, strip)
-
- def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
- """Returns the value of the argument with the given name
- from the request body.
-
- If default is not provided, the argument is considered to be
- required, and we raise a `MissingArgumentError` if it is missing.
-
- If the argument appears in the url more than once, we return the
- last value.
-
- The returned value is always unicode.
-
- .. versionadded:: 3.2
- """
- return self._get_argument(name, default, self.request.body_arguments,
- strip)
-
- def get_body_arguments(self, name, strip=True):
- """Returns a list of the body arguments with the given name.
-
- If the argument is not present, returns an empty list.
-
- The returned values are always unicode.
-
- .. versionadded:: 3.2
- """
- return self._get_arguments(name, self.request.body_arguments, strip)
-
- def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
- """Returns the value of the argument with the given name
- from the request query string.
-
- If default is not provided, the argument is considered to be
- required, and we raise a `MissingArgumentError` if it is missing.
-
- If the argument appears in the url more than once, we return the
- last value.
-
- The returned value is always unicode.
-
- .. versionadded:: 3.2
- """
- return self._get_argument(name, default,
- self.request.query_arguments, strip)
-
- def get_query_arguments(self, name, strip=True):
- """Returns a list of the query arguments with the given name.
-
- If the argument is not present, returns an empty list.
-
- The returned values are always unicode.
-
- .. versionadded:: 3.2
- """
- return self._get_arguments(name, self.request.query_arguments, strip)
-
- def _get_argument(self, name, default, source, strip=True):
- args = self._get_arguments(name, source, strip=strip)
- if not args:
- if default is self._ARG_DEFAULT:
- raise MissingArgumentError(name)
- return default
- return args[-1]
-
- def _get_arguments(self, name, source, strip=True):
- values = []
- for v in source.get(name, []):
- v = self.decode_argument(v, name=name)
- if isinstance(v, unicode_type):
- # Get rid of any weird control chars (unless decoding gave
- # us bytes, in which case leave it alone)
- v = RequestHandler._remove_control_chars_regex.sub(" ", v)
- if strip:
- v = v.strip()
- values.append(v)
- return values
-
- def decode_argument(self, value, name=None):
- """Decodes an argument from the request.
-
- The argument has been percent-decoded and is now a byte string.
- By default, this method decodes the argument as utf-8 and returns
- a unicode string, but this may be overridden in subclasses.
-
- This method is used as a filter for both `get_argument()` and for
- values extracted from the url and passed to `get()`/`post()`/etc.
-
- The name of the argument is provided if known, but may be None
- (e.g. for unnamed groups in the url regex).
- """
- try:
- return _unicode(value)
- except UnicodeDecodeError:
- raise HTTPError(400, "Invalid unicode in %s: %r" %
- (name or "url", value[:40]))
-
- @property
- def cookies(self):
- """An alias for
- `self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
- return self.request.cookies
-
- def get_cookie(self, name, default=None):
- """Gets the value of the cookie with the given name, else default."""
- if self.request.cookies is not None and name in self.request.cookies:
- return self.request.cookies[name].value
- return default
-
- def set_cookie(self, name, value, domain=None, expires=None, path="/",
- expires_days=None, **kwargs):
- """Sets the given cookie name/value with the given options.
-
- Additional keyword arguments are set on the Cookie.Morsel
- directly.
- See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
- for available attributes.
- """
- # The cookie library only accepts type str, in both python 2 and 3
- name = escape.native_str(name)
- value = escape.native_str(value)
- if re.search(r"[\x00-\x20]", name + value):
- # Don't let us accidentally inject bad stuff
- raise ValueError("Invalid cookie %r: %r" % (name, value))
- if not hasattr(self, "_new_cookie"):
- self._new_cookie = Cookie.SimpleCookie()
- if name in self._new_cookie:
- del self._new_cookie[name]
- self._new_cookie[name] = value
- morsel = self._new_cookie[name]
- if domain:
- morsel["domain"] = domain
- if expires_days is not None and not expires:
- expires = datetime.datetime.utcnow() + datetime.timedelta(
- days=expires_days)
- if expires:
- morsel["expires"] = httputil.format_timestamp(expires)
- if path:
- morsel["path"] = path
- for k, v in kwargs.items():
- if k == 'max_age':
- k = 'max-age'
-
- # skip falsy values for httponly and secure flags because
- # SimpleCookie sets them regardless
- if k in ['httponly', 'secure'] and not v:
- continue
-
- morsel[k] = v
-
- def clear_cookie(self, name, path="/", domain=None):
- """Deletes the cookie with the given name.
-
- Due to limitations of the cookie protocol, you must pass the same
- path and domain to clear a cookie as were used when that cookie
- was set (but there is no way to find out on the server side
- which values were used for a given cookie).
- """
- expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
- self.set_cookie(name, value="", path=path, expires=expires,
- domain=domain)
-
- def clear_all_cookies(self, path="/", domain=None):
- """Deletes all the cookies the user sent with this request.
-
- See `clear_cookie` for more information on the path and domain
- parameters.
-
- .. versionchanged:: 3.2
-
- Added the ``path`` and ``domain`` parameters.
- """
- for name in self.request.cookies:
- self.clear_cookie(name, path=path, domain=domain)
-
- def set_secure_cookie(self, name, value, expires_days=30, version=None,
- **kwargs):
- """Signs and timestamps a cookie so it cannot be forged.
-
- You must specify the ``cookie_secret`` setting in your Application
- to use this method. It should be a long, random sequence of bytes
- to be used as the HMAC secret for the signature.
-
- To read a cookie set with this method, use `get_secure_cookie()`.
-
- Note that the ``expires_days`` parameter sets the lifetime of the
- cookie in the browser, but is independent of the ``max_age_days``
- parameter to `get_secure_cookie`.
-
- Secure cookies may contain arbitrary byte values, not just unicode
- strings (unlike regular cookies)
-
- .. versionchanged:: 3.2.1
-
- Added the ``version`` argument. Introduced cookie version 2
- and made it the default.
- """
- self.set_cookie(name, self.create_signed_value(name, value,
- version=version),
- expires_days=expires_days, **kwargs)
-
- def create_signed_value(self, name, value, version=None):
- """Signs and timestamps a string so it cannot be forged.
-
- Normally used via set_secure_cookie, but provided as a separate
- method for non-cookie uses. To decode a value not stored
- as a cookie use the optional value argument to get_secure_cookie.
-
- .. versionchanged:: 3.2.1
-
- Added the ``version`` argument. Introduced cookie version 2
- and made it the default.
- """
- self.require_setting("cookie_secret", "secure cookies")
- secret = self.application.settings["cookie_secret"]
- key_version = None
- if isinstance(secret, dict):
- if self.application.settings.get("key_version") is None:
- raise Exception("key_version setting must be used for secret_key dicts")
- key_version = self.application.settings["key_version"]
-
- return create_signed_value(secret, name, value, version=version,
- key_version=key_version)
-
- def get_secure_cookie(self, name, value=None, max_age_days=31,
- min_version=None):
- """Returns the given signed cookie if it validates, or None.
-
- The decoded cookie value is returned as a byte string (unlike
- `get_cookie`).
-
- .. versionchanged:: 3.2.1
-
- Added the ``min_version`` argument. Introduced cookie version 2;
- both versions 1 and 2 are accepted by default.
- """
- self.require_setting("cookie_secret", "secure cookies")
- if value is None:
- value = self.get_cookie(name)
- return decode_signed_value(self.application.settings["cookie_secret"],
- name, value, max_age_days=max_age_days,
- min_version=min_version)
-
- def get_secure_cookie_key_version(self, name, value=None):
- """Returns the signing key version of the secure cookie.
-
- The version is returned as int.
- """
- self.require_setting("cookie_secret", "secure cookies")
- if value is None:
- value = self.get_cookie(name)
- return get_signature_key_version(value)
-
- def redirect(self, url, permanent=False, status=None):
- """Sends a redirect to the given (optionally relative) URL.
-
- If the ``status`` argument is specified, that value is used as the
- HTTP status code; otherwise either 301 (permanent) or 302
- (temporary) is chosen based on the ``permanent`` argument.
- The default is 302 (temporary).
- """
- if self._headers_written:
- raise Exception("Cannot redirect after headers have been written")
- if status is None:
- status = 301 if permanent else 302
- else:
- assert isinstance(status, int) and 300 <= status <= 399
- self.set_status(status)
- self.set_header("Location", utf8(url))
- self.finish()
-
- def write(self, chunk):
- """Writes the given chunk to the output buffer.
-
- To write the output to the network, use the flush() method below.
-
- If the given chunk is a dictionary, we write it as JSON and set
- the Content-Type of the response to be ``application/json``.
- (if you want to send JSON as a different ``Content-Type``, call
- set_header *after* calling write()).
-
- Note that lists are not converted to JSON because of a potential
- cross-site security vulnerability. All JSON output should be
- wrapped in a dictionary. More details at
- http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
- https://github.com/facebook/tornado/issues/1009
- """
- if self._finished:
- raise RuntimeError("Cannot write() after finish()")
- if not isinstance(chunk, (bytes, unicode_type, dict)):
- message = "write() only accepts bytes, unicode, and dict objects"
- if isinstance(chunk, list):
- message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
- raise TypeError(message)
- if isinstance(chunk, dict):
- chunk = escape.json_encode(chunk)
- self.set_header("Content-Type", "application/json; charset=UTF-8")
- chunk = utf8(chunk)
- self._write_buffer.append(chunk)
-
- def render(self, template_name, **kwargs):
- """Renders the template with the given arguments as the response."""
- if self._finished:
- raise RuntimeError("Cannot render() after finish()")
- html = self.render_string(template_name, **kwargs)
-
- # Insert the additional JS and CSS added by the modules on the page
- js_embed = []
- js_files = []
- css_embed = []
- css_files = []
- html_heads = []
- html_bodies = []
- for module in getattr(self, "_active_modules", {}).values():
- embed_part = module.embedded_javascript()
- if embed_part:
- js_embed.append(utf8(embed_part))
- file_part = module.javascript_files()
- if file_part:
- if isinstance(file_part, (unicode_type, bytes)):
- js_files.append(file_part)
- else:
- js_files.extend(file_part)
- embed_part = module.embedded_css()
- if embed_part:
- css_embed.append(utf8(embed_part))
- file_part = module.css_files()
- if file_part:
- if isinstance(file_part, (unicode_type, bytes)):
- css_files.append(file_part)
- else:
- css_files.extend(file_part)
- head_part = module.html_head()
- if head_part:
- html_heads.append(utf8(head_part))
- body_part = module.html_body()
- if body_part:
- html_bodies.append(utf8(body_part))
-
- if js_files:
- # Maintain order of JavaScript files given by modules
- js = self.render_linked_js(js_files)
- sloc = html.rindex(b'</body>')
- html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
- if js_embed:
- js = self.render_embed_js(js_embed)
- sloc = html.rindex(b'</body>')
- html = html[:sloc] + js + b'\n' + html[sloc:]
- if css_files:
- css = self.render_linked_css(css_files)
- hloc = html.index(b'</head>')
- html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
- if css_embed:
- css = self.render_embed_css(css_embed)
- hloc = html.index(b'</head>')
- html = html[:hloc] + css + b'\n' + html[hloc:]
- if html_heads:
- hloc = html.index(b'</head>')
- html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
- if html_bodies:
- hloc = html.index(b'</body>')
- html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
- self.finish(html)
-
- def render_linked_js(self, js_files):
- """Default method used to render the final js links for the
- rendered webpage.
-
- Override this method in a sub-classed controller to change the output.
- """
- paths = []
- unique_paths = set()
-
- for path in js_files:
- if not is_absolute(path):
- path = self.static_url(path)
- if path not in unique_paths:
- paths.append(path)
- unique_paths.add(path)
-
- return ''.join('<script src="' + escape.xhtml_escape(p) +
- '" type="text/javascript"></script>'
- for p in paths)
-
- def render_embed_js(self, js_embed):
- """Default method used to render the final embedded js for the
- rendered webpage.
-
- Override this method in a sub-classed controller to change the output.
- """
- return b'<script type="text/javascript">\n//<![CDATA[\n' + \
- b'\n'.join(js_embed) + b'\n//]]>\n</script>'
-
- def render_linked_css(self, css_files):
- """Default method used to render the final css links for the
- rendered webpage.
-
- Override this method in a sub-classed controller to change the output.
- """
- paths = []
- unique_paths = set()
-
- for path in css_files:
- if not is_absolute(path):
- path = self.static_url(path)
- if path not in unique_paths:
- paths.append(path)
- unique_paths.add(path)
-
- return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
- 'type="text/css" rel="stylesheet"/>'
- for p in paths)
-
- def render_embed_css(self, css_embed):
- """Default method used to render the final embedded css for the
- rendered webpage.
-
- Override this method in a sub-classed controller to change the output.
- """
- return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
- b'\n</style>'
-
- def render_string(self, template_name, **kwargs):
- """Generate the given template with the given arguments.
-
- We return the generated byte string (in utf8). To generate and
- write a template as a response, use render() above.
- """
- # If no template_path is specified, use the path of the calling file
- template_path = self.get_template_path()
- if not template_path:
- frame = sys._getframe(0)
- web_file = frame.f_code.co_filename
- while frame.f_code.co_filename == web_file:
- frame = frame.f_back
- template_path = os.path.dirname(frame.f_code.co_filename)
- with RequestHandler._template_loader_lock:
- if template_path not in RequestHandler._template_loaders:
- loader = self.create_template_loader(template_path)
- RequestHandler._template_loaders[template_path] = loader
- else:
- loader = RequestHandler._template_loaders[template_path]
- t = loader.load(template_name)
- namespace = self.get_template_namespace()
- namespace.update(kwargs)
- return t.generate(**namespace)
-
- def get_template_namespace(self):
- """Returns a dictionary to be used as the default template namespace.
-
- May be overridden by subclasses to add or modify values.
-
- The results of this method will be combined with additional
- defaults in the `tornado.template` module and keyword arguments
- to `render` or `render_string`.
- """
- namespace = dict(
- handler=self,
- request=self.request,
- current_user=self.current_user,
- locale=self.locale,
- _=self.locale.translate,
- pgettext=self.locale.pgettext,
- static_url=self.static_url,
- xsrf_form_html=self.xsrf_form_html,
- reverse_url=self.reverse_url
- )
- namespace.update(self.ui)
- return namespace
-
- def create_template_loader(self, template_path):
- """Returns a new template loader for the given path.
-
- May be overridden by subclasses. By default returns a
- directory-based loader on the given path, using the
- ``autoescape`` and ``template_whitespace`` application
- settings. If a ``template_loader`` application setting is
- supplied, uses that instead.
- """
- settings = self.application.settings
- if "template_loader" in settings:
- return settings["template_loader"]
- kwargs = {}
- if "autoescape" in settings:
- # autoescape=None means "no escaping", so we have to be sure
- # to only pass this kwarg if the user asked for it.
- kwargs["autoescape"] = settings["autoescape"]
- if "template_whitespace" in settings:
- kwargs["whitespace"] = settings["template_whitespace"]
- return template.Loader(template_path, **kwargs)
-
- def flush(self, include_footers=False, callback=None):
- """Flushes the current output buffer to the network.
-
- The ``callback`` argument, if given, can be used for flow control:
- it will be run when all flushed data has been written to the socket.
- Note that only one flush callback can be outstanding at a time;
- if another flush occurs before the previous flush's callback
- has been run, the previous callback will be discarded.
-
- .. versionchanged:: 4.0
- Now returns a `.Future` if no callback is given.
- """
- chunk = b"".join(self._write_buffer)
- self._write_buffer = []
- if not self._headers_written:
- self._headers_written = True
- for transform in self._transforms:
- self._status_code, self._headers, chunk = \
- transform.transform_first_chunk(
- self._status_code, self._headers,
- chunk, include_footers)
- # Ignore the chunk and only write the headers for HEAD requests
- if self.request.method == "HEAD":
- chunk = None
-
- # Finalize the cookie headers (which have been stored in a side
- # object so an outgoing cookie could be overwritten before it
- # is sent).
- if hasattr(self, "_new_cookie"):
- for cookie in self._new_cookie.values():
- self.add_header("Set-Cookie", cookie.OutputString(None))
-
- start_line = httputil.ResponseStartLine('',
- self._status_code,
- self._reason)
- return self.request.connection.write_headers(
- start_line, self._headers, chunk, callback=callback)
- else:
- for transform in self._transforms:
- chunk = transform.transform_chunk(chunk, include_footers)
- # Ignore the chunk and only write the headers for HEAD requests
- if self.request.method != "HEAD":
- return self.request.connection.write(chunk, callback=callback)
- else:
- future = Future()
- future.set_result(None)
- return future
-
- def finish(self, chunk=None):
- """Finishes this response, ending the HTTP request."""
- if self._finished:
- raise RuntimeError("finish() called twice")
-
- if chunk is not None:
- self.write(chunk)
-
- # Automatically support ETags and add the Content-Length header if
- # we have not flushed any content yet.
- if not self._headers_written:
- if (self._status_code == 200 and
- self.request.method in ("GET", "HEAD") and
- "Etag" not in self._headers):
- self.set_etag_header()
- if self.check_etag_header():
- self._write_buffer = []
- self.set_status(304)
- if (self._status_code in (204, 304) or
- (self._status_code >= 100 and self._status_code < 200)):
- assert not self._write_buffer, "Cannot send body with %s" % self._status_code
- self._clear_headers_for_304()
- elif "Content-Length" not in self._headers:
- content_length = sum(len(part) for part in self._write_buffer)
- self.set_header("Content-Length", content_length)
-
- if hasattr(self.request, "connection"):
- # Now that the request is finished, clear the callback we
- # set on the HTTPConnection (which would otherwise prevent the
- # garbage collection of the RequestHandler when there
- # are keepalive connections)
- self.request.connection.set_close_callback(None)
-
- self.flush(include_footers=True)
- self.request.finish()
- self._log()
- self._finished = True
- self.on_finish()
- self._break_cycles()
-
- def _break_cycles(self):
- # Break up a reference cycle between this handler and the
- # _ui_module closures to allow for faster GC on CPython.
- self.ui = None
-
- def send_error(self, status_code=500, **kwargs):
- """Sends the given HTTP error code to the browser.
-
- If `flush()` has already been called, it is not possible to send
- an error, so this method will simply terminate the response.
- If output has been written but not yet flushed, it will be discarded
- and replaced with the error page.
-
- Override `write_error()` to customize the error page that is returned.
- Additional keyword arguments are passed through to `write_error`.
- """
- if self._headers_written:
- gen_log.error("Cannot send error response after headers written")
- if not self._finished:
- # If we get an error between writing headers and finishing,
- # we are unlikely to be able to finish due to a
- # Content-Length mismatch. Try anyway to release the
- # socket.
- try:
- self.finish()
- except Exception:
- gen_log.error("Failed to flush partial response",
- exc_info=True)
- return
- self.clear()
-
- reason = kwargs.get('reason')
- if 'exc_info' in kwargs:
- exception = kwargs['exc_info'][1]
- if isinstance(exception, HTTPError) and exception.reason:
- reason = exception.reason
- self.set_status(status_code, reason=reason)
- try:
- self.write_error(status_code, **kwargs)
- except Exception:
- app_log.error("Uncaught exception in write_error", exc_info=True)
- if not self._finished:
- self.finish()
-
- def write_error(self, status_code, **kwargs):
- """Override to implement custom error pages.
-
- ``write_error`` may call `write`, `render`, `set_header`, etc
- to produce output as usual.
-
- If this error was caused by an uncaught exception (including
- HTTPError), an ``exc_info`` triple will be available as
- ``kwargs["exc_info"]``. Note that this exception may not be
- the "current" exception for purposes of methods like
- ``sys.exc_info()`` or ``traceback.format_exc``.
- """
- if self.settings.get("serve_traceback") and "exc_info" in kwargs:
- # in debug mode, try to send a traceback
- self.set_header('Content-Type', 'text/plain')
- for line in traceback.format_exception(*kwargs["exc_info"]):
- self.write(line)
- self.finish()
- else:
- self.finish("<html><title>%(code)d: %(message)s</title>"
- "<body>%(code)d: %(message)s</body></html>" % {
- "code": status_code,
- "message": self._reason,
- })
-
- @property
- def locale(self):
- """The locale for the current session.
-
- Determined by either `get_user_locale`, which you can override to
- set the locale based on, e.g., a user preference stored in a
- database, or `get_browser_locale`, which uses the ``Accept-Language``
- header.
-
- .. versionchanged: 4.1
- Added a property setter.
- """
- if not hasattr(self, "_locale"):
- self._locale = self.get_user_locale()
- if not self._locale:
- self._locale = self.get_browser_locale()
- assert self._locale
- return self._locale
-
- @locale.setter
- def locale(self, value):
- self._locale = value
-
- def get_user_locale(self):
- """Override to determine the locale from the authenticated user.
-
- If None is returned, we fall back to `get_browser_locale()`.
-
- This method should return a `tornado.locale.Locale` object,
- most likely obtained via a call like ``tornado.locale.get("en")``
- """
- return None
-
- def get_browser_locale(self, default="en_US"):
- """Determines the user's locale from ``Accept-Language`` header.
-
- See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
- """
- if "Accept-Language" in self.request.headers:
- languages = self.request.headers["Accept-Language"].split(",")
- locales = []
- for language in languages:
- parts = language.strip().split(";")
- if len(parts) > 1 and parts[1].startswith("q="):
- try:
- score = float(parts[1][2:])
- except (ValueError, TypeError):
- score = 0.0
- else:
- score = 1.0
- locales.append((parts[0], score))
- if locales:
- locales.sort(key=lambda pair: pair[1], reverse=True)
- codes = [l[0] for l in locales]
- return locale.get(*codes)
- return locale.get(default)
-
- @property
- def current_user(self):
- """The authenticated user for this request.
-
- This is set in one of two ways:
-
- * A subclass may override `get_current_user()`, which will be called
- automatically the first time ``self.current_user`` is accessed.
- `get_current_user()` will only be called once per request,
- and is cached for future access::
-
- def get_current_user(self):
- user_cookie = self.get_secure_cookie("user")
- if user_cookie:
- return json.loads(user_cookie)
- return None
-
- * It may be set as a normal variable, typically from an overridden
- `prepare()`::
-
- @gen.coroutine
- def prepare(self):
- user_id_cookie = self.get_secure_cookie("user_id")
- if user_id_cookie:
- self.current_user = yield load_user(user_id_cookie)
-
- Note that `prepare()` may be a coroutine while `get_current_user()`
- may not, so the latter form is necessary if loading the user requires
- asynchronous operations.
-
- The user object may be any type of the application's choosing.
- """
- if not hasattr(self, "_current_user"):
- self._current_user = self.get_current_user()
- return self._current_user
-
- @current_user.setter
- def current_user(self, value):
- self._current_user = value
-
- def get_current_user(self):
- """Override to determine the current user from, e.g., a cookie.
-
- This method may not be a coroutine.
- """
- return None
-
- def get_login_url(self):
- """Override to customize the login URL based on the request.
-
- By default, we use the ``login_url`` application setting.
- """
- self.require_setting("login_url", "@tornado.web.authenticated")
- return self.application.settings["login_url"]
-
- def get_template_path(self):
- """Override to customize template path for each handler.
-
- By default, we use the ``template_path`` application setting.
- Return None to load templates relative to the calling file.
- """
- return self.application.settings.get("template_path")
-
- @property
- def xsrf_token(self):
- """The XSRF-prevention token for the current user/session.
-
- To prevent cross-site request forgery, we set an '_xsrf' cookie
- and include the same '_xsrf' value as an argument with all POST
- requests. If the two do not match, we reject the form submission
- as a potential forgery.
-
- See http://en.wikipedia.org/wiki/Cross-site_request_forgery
-
- .. versionchanged:: 3.2.2
- The xsrf token will now be have a random mask applied in every
- request, which makes it safe to include the token in pages
- that are compressed. See http://breachattack.com for more
- information on the issue fixed by this change. Old (version 1)
- cookies will be converted to version 2 when this method is called
- unless the ``xsrf_cookie_version`` `Application` setting is
- set to 1.
-
- .. versionchanged:: 4.3
- The ``xsrf_cookie_kwargs`` `Application` setting may be
- used to supply additional cookie options (which will be
- passed directly to `set_cookie`). For example,
- ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
- will set the ``secure`` and ``httponly`` flags on the
- ``_xsrf`` cookie.
- """
- if not hasattr(self, "_xsrf_token"):
- version, token, timestamp = self._get_raw_xsrf_token()
- output_version = self.settings.get("xsrf_cookie_version", 2)
- cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
- if output_version == 1:
- self._xsrf_token = binascii.b2a_hex(token)
- elif output_version == 2:
- mask = os.urandom(4)
- self._xsrf_token = b"|".join([
- b"2",
- binascii.b2a_hex(mask),
- binascii.b2a_hex(_websocket_mask(mask, token)),
- utf8(str(int(timestamp)))])
- else:
- raise ValueError("unknown xsrf cookie version %d",
- output_version)
- if version is None:
- expires_days = 30 if self.current_user else None
- self.set_cookie("_xsrf", self._xsrf_token,
- expires_days=expires_days,
- **cookie_kwargs)
- return self._xsrf_token
-
- def _get_raw_xsrf_token(self):
- """Read or generate the xsrf token in its raw form.
-
- The raw_xsrf_token is a tuple containing:
-
- * version: the version of the cookie from which this token was read,
- or None if we generated a new token in this request.
- * token: the raw token data; random (non-ascii) bytes.
- * timestamp: the time this token was generated (will not be accurate
- for version 1 cookies)
- """
- if not hasattr(self, '_raw_xsrf_token'):
- cookie = self.get_cookie("_xsrf")
- if cookie:
- version, token, timestamp = self._decode_xsrf_token(cookie)
- else:
- version, token, timestamp = None, None, None
- if token is None:
- version = None
- token = os.urandom(16)
- timestamp = time.time()
- self._raw_xsrf_token = (version, token, timestamp)
- return self._raw_xsrf_token
-
- def _decode_xsrf_token(self, cookie):
- """Convert a cookie string into a the tuple form returned by
- _get_raw_xsrf_token.
- """
-
- try:
- m = _signed_value_version_re.match(utf8(cookie))
-
- if m:
- version = int(m.group(1))
- if version == 2:
- _, mask, masked_token, timestamp = cookie.split("|")
-
- mask = binascii.a2b_hex(utf8(mask))
- token = _websocket_mask(
- mask, binascii.a2b_hex(utf8(masked_token)))
- timestamp = int(timestamp)
- return version, token, timestamp
- else:
- # Treat unknown versions as not present instead of failing.
- raise Exception("Unknown xsrf cookie version")
- else:
- version = 1
- try:
- token = binascii.a2b_hex(utf8(cookie))
- except (binascii.Error, TypeError):
- token = utf8(cookie)
- # We don't have a usable timestamp in older versions.
- timestamp = int(time.time())
- return (version, token, timestamp)
- except Exception:
- # Catch exceptions and return nothing instead of failing.
- gen_log.debug("Uncaught exception in _decode_xsrf_token",
- exc_info=True)
- return None, None, None
-
- def check_xsrf_cookie(self):
- """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
-
- To prevent cross-site request forgery, we set an ``_xsrf``
- cookie and include the same value as a non-cookie
- field with all ``POST`` requests. If the two do not match, we
- reject the form submission as a potential forgery.
-
- The ``_xsrf`` value may be set as either a form field named ``_xsrf``
- or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
- (the latter is accepted for compatibility with Django).
-
- See http://en.wikipedia.org/wiki/Cross-site_request_forgery
-
- Prior to release 1.1.1, this check was ignored if the HTTP header
- ``X-Requested-With: XMLHTTPRequest`` was present. This exception
- has been shown to be insecure and has been removed. For more
- information please see
- http://www.djangoproject.com/weblog/2011/feb/08/security/
- http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
-
- .. versionchanged:: 3.2.2
- Added support for cookie version 2. Both versions 1 and 2 are
- supported.
- """
- token = (self.get_argument("_xsrf", None) or
- self.request.headers.get("X-Xsrftoken") or
- self.request.headers.get("X-Csrftoken"))
- if not token:
- raise HTTPError(403, "'_xsrf' argument missing from POST")
- _, token, _ = self._decode_xsrf_token(token)
- _, expected_token, _ = self._get_raw_xsrf_token()
- if not token:
- raise HTTPError(403, "'_xsrf' argument has invalid format")
- if not _time_independent_equals(utf8(token), utf8(expected_token)):
- raise HTTPError(403, "XSRF cookie does not match POST argument")
-
- def xsrf_form_html(self):
- """An HTML ``<input/>`` element to be included with all POST forms.
-
- It defines the ``_xsrf`` input value, which we check on all POST
- requests to prevent cross-site request forgery. If you have set
- the ``xsrf_cookies`` application setting, you must include this
- HTML within all of your HTML forms.
-
- In a template, this method should be called with ``{% module
- xsrf_form_html() %}``
-
- See `check_xsrf_cookie()` above for more information.
- """
- return '<input type="hidden" name="_xsrf" value="' + \
- escape.xhtml_escape(self.xsrf_token) + '"/>'
-
- def static_url(self, path, include_host=None, **kwargs):
- """Returns a static URL for the given relative static file path.
-
- This method requires you set the ``static_path`` setting in your
- application (which specifies the root directory of your static
- files).
-
- This method returns a versioned url (by default appending
- ``?v=<signature>``), which allows the static files to be
- cached indefinitely. This can be disabled by passing
- ``include_version=False`` (in the default implementation;
- other static file implementations are not required to support
- this, but they may support other options).
-
- By default this method returns URLs relative to the current
- host, but if ``include_host`` is true the URL returned will be
- absolute. If this handler has an ``include_host`` attribute,
- that value will be used as the default for all `static_url`
- calls that do not pass ``include_host`` as a keyword argument.
-
- """
- self.require_setting("static_path", "static_url")
- get_url = self.settings.get("static_handler_class",
- StaticFileHandler).make_static_url
-
- if include_host is None:
- include_host = getattr(self, "include_host", False)
-
- if include_host:
- base = self.request.protocol + "://" + self.request.host
- else:
- base = ""
-
- return base + get_url(self.settings, path, **kwargs)
-
- def require_setting(self, name, feature="this feature"):
- """Raises an exception if the given app setting is not defined."""
- if not self.application.settings.get(name):
- raise Exception("You must define the '%s' setting in your "
- "application to use %s" % (name, feature))
-
- def reverse_url(self, name, *args):
- """Alias for `Application.reverse_url`."""
- return self.application.reverse_url(name, *args)
-
- def compute_etag(self):
- """Computes the etag header to be used for this request.
-
- By default uses a hash of the content written so far.
-
- May be overridden to provide custom etag implementations,
- or may return None to disable tornado's default etag support.
- """
- hasher = hashlib.sha1()
- for part in self._write_buffer:
- hasher.update(part)
- return '"%s"' % hasher.hexdigest()
-
- def set_etag_header(self):
- """Sets the response's Etag header using ``self.compute_etag()``.
-
- Note: no header will be set if ``compute_etag()`` returns ``None``.
-
- This method is called automatically when the request is finished.
- """
- etag = self.compute_etag()
- if etag is not None:
- self.set_header("Etag", etag)
-
- def check_etag_header(self):
- """Checks the ``Etag`` header against requests's ``If-None-Match``.
-
- Returns ``True`` if the request's Etag matches and a 304 should be
- returned. For example::
-
- self.set_etag_header()
- if self.check_etag_header():
- self.set_status(304)
- return
-
- This method is called automatically when the request is finished,
- but may be called earlier for applications that override
- `compute_etag` and want to do an early check for ``If-None-Match``
- before completing the request. The ``Etag`` header should be set
- (perhaps with `set_etag_header`) before calling this method.
- """
- computed_etag = utf8(self._headers.get("Etag", ""))
- # Find all weak and strong etag values from If-None-Match header
- # because RFC 7232 allows multiple etag values in a single header.
- etags = re.findall(
- br'\*|(?:W/)?"[^"]*"',
- utf8(self.request.headers.get("If-None-Match", ""))
- )
- if not computed_etag or not etags:
- return False
-
- match = False
- if etags[0] == b'*':
- match = True
- else:
- # Use a weak comparison when comparing entity-tags.
- def val(x):
- return x[2:] if x.startswith(b'W/') else x
-
- for etag in etags:
- if val(etag) == val(computed_etag):
- match = True
- break
- return match
-
- def _stack_context_handle_exception(self, type, value, traceback):
- try:
- # For historical reasons _handle_request_exception only takes
- # the exception value instead of the full triple,
- # so re-raise the exception to ensure that it's in
- # sys.exc_info()
- raise_exc_info((type, value, traceback))
- except Exception:
- self._handle_request_exception(value)
- return True
-
- @gen.coroutine
- def _execute(self, transforms, *args, **kwargs):
- """Executes this request with the given output transforms."""
- self._transforms = transforms
- try:
- if self.request.method not in self.SUPPORTED_METHODS:
- raise HTTPError(405)
- self.path_args = [self.decode_argument(arg) for arg in args]
- self.path_kwargs = dict((k, self.decode_argument(v, name=k))
- for (k, v) in kwargs.items())
- # If XSRF cookies are turned on, reject form submissions without
- # the proper cookie
- if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
- self.application.settings.get("xsrf_cookies"):
- self.check_xsrf_cookie()
-
- result = self.prepare()
- if result is not None:
- result = yield result
- if self._prepared_future is not None:
- # Tell the Application we've finished with prepare()
- # and are ready for the body to arrive.
- self._prepared_future.set_result(None)
- if self._finished:
- return
-
- if _has_stream_request_body(self.__class__):
- # In streaming mode request.body is a Future that signals
- # the body has been completely received. The Future has no
- # result; the data has been passed to self.data_received
- # instead.
- try:
- yield self.request.body
- except iostream.StreamClosedError:
- return
-
- method = getattr(self, self.request.method.lower())
- result = method(*self.path_args, **self.path_kwargs)
- if result is not None:
- result = yield result
- if self._auto_finish and not self._finished:
- self.finish()
- except Exception as e:
- try:
- self._handle_request_exception(e)
- except Exception:
- app_log.error("Exception in exception handler", exc_info=True)
- if (self._prepared_future is not None and
- not self._prepared_future.done()):
- # In case we failed before setting _prepared_future, do it
- # now (to unblock the HTTP server). Note that this is not
- # in a finally block to avoid GC issues prior to Python 3.4.
- self._prepared_future.set_result(None)
-
- def data_received(self, chunk):
- """Implement this method to handle streamed request data.
-
- Requires the `.stream_request_body` decorator.
- """
- raise NotImplementedError()
-
- def _log(self):
- """Logs the current request.
-
- Sort of deprecated since this functionality was moved to the
- Application, but left in place for the benefit of existing apps
- that have overridden this method.
- """
- self.application.log_request(self)
-
- def _request_summary(self):
- return "%s %s (%s)" % (self.request.method, self.request.uri,
- self.request.remote_ip)
-
- def _handle_request_exception(self, e):
- if isinstance(e, Finish):
- # Not an error; just finish the request without logging.
- if not self._finished:
- self.finish(*e.args)
- return
- try:
- self.log_exception(*sys.exc_info())
- except Exception:
- # An error here should still get a best-effort send_error()
- # to avoid leaking the connection.
- app_log.error("Error in exception logger", exc_info=True)
- if self._finished:
- # Extra errors after the request has been finished should
- # be logged, but there is no reason to continue to try and
- # send a response.
- return
- if isinstance(e, HTTPError):
- if e.status_code not in httputil.responses and not e.reason:
- gen_log.error("Bad HTTP status code: %d", e.status_code)
- self.send_error(500, exc_info=sys.exc_info())
- else:
- self.send_error(e.status_code, exc_info=sys.exc_info())
- else:
- self.send_error(500, exc_info=sys.exc_info())
-
- def log_exception(self, typ, value, tb):
- """Override to customize logging of uncaught exceptions.
-
- By default logs instances of `HTTPError` as warnings without
- stack traces (on the ``tornado.general`` logger), and all
- other exceptions as errors with stack traces (on the
- ``tornado.application`` logger).
-
- .. versionadded:: 3.1
- """
- if isinstance(value, HTTPError):
- if value.log_message:
- format = "%d %s: " + value.log_message
- args = ([value.status_code, self._request_summary()] +
- list(value.args))
- gen_log.warning(format, *args)
- else:
- app_log.error("Uncaught exception %s\n%r", self._request_summary(),
- self.request, exc_info=(typ, value, tb))
-
- def _ui_module(self, name, module):
- def render(*args, **kwargs):
- if not hasattr(self, "_active_modules"):
- self._active_modules = {}
- if name not in self._active_modules:
- self._active_modules[name] = module(self)
- rendered = self._active_modules[name].render(*args, **kwargs)
- return rendered
- return render
-
- def _ui_method(self, method):
- return lambda *args, **kwargs: method(self, *args, **kwargs)
-
- def _clear_headers_for_304(self):
- # 304 responses should not contain entity headers (defined in
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
- # not explicitly allowed by
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
- headers = ["Allow", "Content-Encoding", "Content-Language",
- "Content-Length", "Content-MD5", "Content-Range",
- "Content-Type", "Last-Modified"]
- for h in headers:
- self.clear_header(h)
-
-
-def asynchronous(method):
- """Wrap request handler methods with this if they are asynchronous.
-
- This decorator is for callback-style asynchronous methods; for
- coroutines, use the ``@gen.coroutine`` decorator without
- ``@asynchronous``. (It is legal for legacy reasons to use the two
- decorators together provided ``@asynchronous`` is first, but
- ``@asynchronous`` will be ignored in this case)
-
- This decorator should only be applied to the :ref:`HTTP verb
- methods <verbs>`; its behavior is undefined for any other method.
- This decorator does not *make* a method asynchronous; it tells
- the framework that the method *is* asynchronous. For this decorator
- to be useful the method must (at least sometimes) do something
- asynchronous.
-
- If this decorator is given, the response is not finished when the
- method returns. It is up to the request handler to call
- `self.finish() <RequestHandler.finish>` to finish the HTTP
- request. Without this decorator, the request is automatically
- finished when the ``get()`` or ``post()`` method returns. Example:
-
- .. testcode::
-
- class MyRequestHandler(RequestHandler):
- @asynchronous
- def get(self):
- http = httpclient.AsyncHTTPClient()
- http.fetch("http://friendfeed.com/", self._on_download)
-
- def _on_download(self, response):
- self.write("Downloaded!")
- self.finish()
-
- .. testoutput::
- :hide:
-
- .. versionchanged:: 3.1
- The ability to use ``@gen.coroutine`` without ``@asynchronous``.
-
- .. versionchanged:: 4.3 Returning anything but ``None`` or a
- yieldable object from a method decorated with ``@asynchronous``
- is an error. Such return values were previously ignored silently.
- """
- # Delay the IOLoop import because it's not available on app engine.
- from tornado.ioloop import IOLoop
-
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- self._auto_finish = False
- with stack_context.ExceptionStackContext(
- self._stack_context_handle_exception):
- result = method(self, *args, **kwargs)
- if result is not None:
- result = gen.convert_yielded(result)
-
- # If @asynchronous is used with @gen.coroutine, (but
- # not @gen.engine), we can automatically finish the
- # request when the future resolves. Additionally,
- # the Future will swallow any exceptions so we need
- # to throw them back out to the stack context to finish
- # the request.
- def future_complete(f):
- f.result()
- if not self._finished:
- self.finish()
- IOLoop.current().add_future(result, future_complete)
- # Once we have done this, hide the Future from our
- # caller (i.e. RequestHandler._when_complete), which
- # would otherwise set up its own callback and
- # exception handler (resulting in exceptions being
- # logged twice).
- return None
- return result
- return wrapper
-
-
-def stream_request_body(cls):
- """Apply to `RequestHandler` subclasses to enable streaming body support.
-
- This decorator implies the following changes:
-
- * `.HTTPServerRequest.body` is undefined, and body arguments will not
- be included in `RequestHandler.get_argument`.
- * `RequestHandler.prepare` is called when the request headers have been
- read instead of after the entire body has been read.
- * The subclass must define a method ``data_received(self, data):``, which
- will be called zero or more times as data is available. Note that
- if the request has an empty body, ``data_received`` may not be called.
- * ``prepare`` and ``data_received`` may return Futures (such as via
- ``@gen.coroutine``, in which case the next method will not be called
- until those futures have completed.
- * The regular HTTP method (``post``, ``put``, etc) will be called after
- the entire body has been read.
-
- See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
- for example usage.
- """
- if not issubclass(cls, RequestHandler):
- raise TypeError("expected subclass of RequestHandler, got %r", cls)
- cls._stream_request_body = True
- return cls
-
-
-def _has_stream_request_body(cls):
- if not issubclass(cls, RequestHandler):
- raise TypeError("expected subclass of RequestHandler, got %r", cls)
- return getattr(cls, '_stream_request_body', False)
-
-
-def removeslash(method):
- """Use this decorator to remove trailing slashes from the request path.
-
- For example, a request to ``/foo/`` would redirect to ``/foo`` with this
- decorator. Your request handler mapping should use a regular expression
- like ``r'/foo/*'`` in conjunction with using the decorator.
- """
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- if self.request.path.endswith("/"):
- if self.request.method in ("GET", "HEAD"):
- uri = self.request.path.rstrip("/")
- if uri: # don't try to redirect '/' to ''
- if self.request.query:
- uri += "?" + self.request.query
- self.redirect(uri, permanent=True)
- return
- else:
- raise HTTPError(404)
- return method(self, *args, **kwargs)
- return wrapper
-
-
-def addslash(method):
- """Use this decorator to add a missing trailing slash to the request path.
-
- For example, a request to ``/foo`` would redirect to ``/foo/`` with this
- decorator. Your request handler mapping should use a regular expression
- like ``r'/foo/?'`` in conjunction with using the decorator.
- """
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- if not self.request.path.endswith("/"):
- if self.request.method in ("GET", "HEAD"):
- uri = self.request.path + "/"
- if self.request.query:
- uri += "?" + self.request.query
- self.redirect(uri, permanent=True)
- return
- raise HTTPError(404)
- return method(self, *args, **kwargs)
- return wrapper
-
-
-class _ApplicationRouter(ReversibleRuleRouter):
- """Routing implementation used internally by `Application`.
-
- Provides a binding between `Application` and `RequestHandler`.
- This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
- * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
- * it allows to use a list/tuple of rules as `~.routing.Rule` target.
- ``process_rule`` implementation will substitute this list with an appropriate
- `_ApplicationRouter` instance.
- """
-
- def __init__(self, application, rules=None):
- assert isinstance(application, Application)
- self.application = application
- super(_ApplicationRouter, self).__init__(rules)
-
- def process_rule(self, rule):
- rule = super(_ApplicationRouter, self).process_rule(rule)
-
- if isinstance(rule.target, (list, tuple)):
- rule.target = _ApplicationRouter(self.application, rule.target)
-
- return rule
-
- def get_target_delegate(self, target, request, **target_params):
- if isclass(target) and issubclass(target, RequestHandler):
- return self.application.get_handler_delegate(request, target, **target_params)
-
- return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
-
-
-class Application(ReversibleRouter):
- """A collection of request handlers that make up a web application.
-
- Instances of this class are callable and can be passed directly to
- HTTPServer to serve the application::
-
- application = web.Application([
- (r"/", MainPageHandler),
- ])
- http_server = httpserver.HTTPServer(application)
- http_server.listen(8080)
- ioloop.IOLoop.current().start()
-
- The constructor for this class takes in a list of `~.routing.Rule`
- objects or tuples of values corresponding to the arguments of
- `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
- the values in square brackets being optional. The default matcher is
- `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
- instead of ``(PathMatches(regexp), target)``.
-
- A common routing target is a `RequestHandler` subclass, but you can also
- use lists of rules as a target, which create a nested routing configuration::
-
- application = web.Application([
- (HostMatches("example.com"), [
- (r"/", MainPageHandler),
- (r"/feed", FeedHandler),
- ]),
- ])
-
- In addition to this you can use nested `~.routing.Router` instances,
- `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
- (see `~.routing` module docs for more information).
-
- When we receive requests, we iterate over the list in order and
- instantiate an instance of the first request class whose regexp
- matches the request path. The request class can be specified as
- either a class object or a (fully-qualified) name.
-
- A dictionary may be passed as the third element (``target_kwargs``)
- of the tuple, which will be used as keyword arguments to the handler's
- constructor and `~RequestHandler.initialize` method. This pattern
- is used for the `StaticFileHandler` in this example (note that a
- `StaticFileHandler` can be installed automatically with the
- static_path setting described below)::
-
- application = web.Application([
- (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
- ])
-
- We support virtual hosts with the `add_handlers` method, which takes in
- a host regular expression as the first argument::
-
- application.add_handlers(r"www\.myhost\.com", [
- (r"/article/([0-9]+)", ArticleHandler),
- ])
-
- If there's no match for the current request's host, then ``default_host``
- parameter value is matched against host regular expressions.
-
- You can serve static files by sending the ``static_path`` setting
- as a keyword argument. We will serve those files from the
- ``/static/`` URI (this is configurable with the
- ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
- and ``/robots.txt`` from the same directory. A custom subclass of
- `StaticFileHandler` can be specified with the
- ``static_handler_class`` setting.
-
- .. versionchanged:: 4.5
- Integration with the new `tornado.routing` module.
- """
- def __init__(self, handlers=None, default_host=None, transforms=None,
- **settings):
- if transforms is None:
- self.transforms = []
- if settings.get("compress_response") or settings.get("gzip"):
- self.transforms.append(GZipContentEncoding)
- else:
- self.transforms = transforms
- self.default_host = default_host
- self.settings = settings
- self.ui_modules = {'linkify': _linkify,
- 'xsrf_form_html': _xsrf_form_html,
- 'Template': TemplateModule,
- }
- self.ui_methods = {}
- self._load_ui_modules(settings.get("ui_modules", {}))
- self._load_ui_methods(settings.get("ui_methods", {}))
- if self.settings.get("static_path"):
- path = self.settings["static_path"]
- handlers = list(handlers or [])
- static_url_prefix = settings.get("static_url_prefix",
- "/static/")
- static_handler_class = settings.get("static_handler_class",
- StaticFileHandler)
- static_handler_args = settings.get("static_handler_args", {})
- static_handler_args['path'] = path
- for pattern in [re.escape(static_url_prefix) + r"(.*)",
- r"/(favicon\.ico)", r"/(robots\.txt)"]:
- handlers.insert(0, (pattern, static_handler_class,
- static_handler_args))
-
- if self.settings.get('debug'):
- self.settings.setdefault('autoreload', True)
- self.settings.setdefault('compiled_template_cache', False)
- self.settings.setdefault('static_hash_cache', False)
- self.settings.setdefault('serve_traceback', True)
-
- self.wildcard_router = _ApplicationRouter(self, handlers)
- self.default_router = _ApplicationRouter(self, [
- Rule(AnyMatches(), self.wildcard_router)
- ])
-
- # Automatically reload modified modules
- if self.settings.get('autoreload'):
- from tornado import autoreload
- autoreload.start()
-
- def listen(self, port, address="", **kwargs):
- """Starts an HTTP server for this application on the given port.
-
- This is a convenience alias for creating an `.HTTPServer`
- object and calling its listen method. Keyword arguments not
- supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
- `.HTTPServer` constructor. For advanced uses
- (e.g. multi-process mode), do not use this method; create an
- `.HTTPServer` and call its
- `.TCPServer.bind`/`.TCPServer.start` methods directly.
-
- Note that after calling this method you still need to call
- ``IOLoop.current().start()`` to start the server.
-
- Returns the `.HTTPServer` object.
-
- .. versionchanged:: 4.3
- Now returns the `.HTTPServer` object.
- """
- # import is here rather than top level because HTTPServer
- # is not importable on appengine
- from tornado.httpserver import HTTPServer
- server = HTTPServer(self, **kwargs)
- server.listen(port, address)
- return server
-
- def add_handlers(self, host_pattern, host_handlers):
- """Appends the given handlers to our handler list.
-
- Host patterns are processed sequentially in the order they were
- added. All matching patterns will be considered.
- """
- host_matcher = HostMatches(host_pattern)
- rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
-
- self.default_router.rules.insert(-1, rule)
-
- if self.default_host is not None:
- self.wildcard_router.add_rules([(
- DefaultHostMatches(self, host_matcher.host_pattern),
- host_handlers
- )])
-
- def add_transform(self, transform_class):
- self.transforms.append(transform_class)
-
- def _load_ui_methods(self, methods):
- if isinstance(methods, types.ModuleType):
- self._load_ui_methods(dict((n, getattr(methods, n))
- for n in dir(methods)))
- elif isinstance(methods, list):
- for m in methods:
- self._load_ui_methods(m)
- else:
- for name, fn in methods.items():
- if not name.startswith("_") and hasattr(fn, "__call__") \
- and name[0].lower() == name[0]:
- self.ui_methods[name] = fn
-
- def _load_ui_modules(self, modules):
- if isinstance(modules, types.ModuleType):
- self._load_ui_modules(dict((n, getattr(modules, n))
- for n in dir(modules)))
- elif isinstance(modules, list):
- for m in modules:
- self._load_ui_modules(m)
- else:
- assert isinstance(modules, dict)
- for name, cls in modules.items():
- try:
- if issubclass(cls, UIModule):
- self.ui_modules[name] = cls
- except TypeError:
- pass
-
- def __call__(self, request):
- # Legacy HTTPServer interface
- dispatcher = self.find_handler(request)
- return dispatcher.execute()
-
- def find_handler(self, request, **kwargs):
- route = self.default_router.find_handler(request)
- if route is not None:
- return route
-
- if self.settings.get('default_handler_class'):
- return self.get_handler_delegate(
- request,
- self.settings['default_handler_class'],
- self.settings.get('default_handler_args', {}))
-
- return self.get_handler_delegate(
- request, ErrorHandler, {'status_code': 404})
-
- def get_handler_delegate(self, request, target_class, target_kwargs=None,
- path_args=None, path_kwargs=None):
- """Returns `~.httputil.HTTPMessageDelegate` that can serve a request
- for application and `RequestHandler` subclass.
-
- :arg httputil.HTTPServerRequest request: current HTTP request.
- :arg RequestHandler target_class: a `RequestHandler` class.
- :arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
- :arg list path_args: positional arguments for ``target_class`` HTTP method that
- will be executed while handling a request (``get``, ``post`` or any other).
- :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
- """
- return _HandlerDelegate(
- self, request, target_class, target_kwargs, path_args, path_kwargs)
-
- def reverse_url(self, name, *args):
- """Returns a URL path for handler named ``name``
-
- The handler must be added to the application as a named `URLSpec`.
-
- Args will be substituted for capturing groups in the `URLSpec` regex.
- They will be converted to strings if necessary, encoded as utf8,
- and url-escaped.
- """
- reversed_url = self.default_router.reverse_url(name, *args)
- if reversed_url is not None:
- return reversed_url
-
- raise KeyError("%s not found in named urls" % name)
-
- def log_request(self, handler):
- """Writes a completed HTTP request to the logs.
-
- By default writes to the python root logger. To change
- this behavior either subclass Application and override this method,
- or pass a function in the application settings dictionary as
- ``log_function``.
- """
- if "log_function" in self.settings:
- self.settings["log_function"](handler)
- return
- if handler.get_status() < 400:
- log_method = access_log.info
- elif handler.get_status() < 500:
- log_method = access_log.warning
- else:
- log_method = access_log.error
- request_time = 1000.0 * handler.request.request_time()
- log_method("%d %s %.2fms", handler.get_status(),
- handler._request_summary(), request_time)
-
-
-class _HandlerDelegate(httputil.HTTPMessageDelegate):
- def __init__(self, application, request, handler_class, handler_kwargs,
- path_args, path_kwargs):
- self.application = application
- self.connection = request.connection
- self.request = request
- self.handler_class = handler_class
- self.handler_kwargs = handler_kwargs or {}
- self.path_args = path_args or []
- self.path_kwargs = path_kwargs or {}
- self.chunks = []
- self.stream_request_body = _has_stream_request_body(self.handler_class)
-
- def headers_received(self, start_line, headers):
- if self.stream_request_body:
- self.request.body = Future()
- return self.execute()
-
- def data_received(self, data):
- if self.stream_request_body:
- return self.handler.data_received(data)
- else:
- self.chunks.append(data)
-
- def finish(self):
- if self.stream_request_body:
- self.request.body.set_result(None)
- else:
- self.request.body = b''.join(self.chunks)
- self.request._parse_body()
- self.execute()
-
- def on_connection_close(self):
- if self.stream_request_body:
- self.handler.on_connection_close()
- else:
- self.chunks = None
-
- def execute(self):
- # If template cache is disabled (usually in the debug mode),
- # re-compile templates and reload static files on every
- # request so you don't need to restart to see changes
- if not self.application.settings.get("compiled_template_cache", True):
- with RequestHandler._template_loader_lock:
- for loader in RequestHandler._template_loaders.values():
- loader.reset()
- if not self.application.settings.get('static_hash_cache', True):
- StaticFileHandler.reset()
-
- self.handler = self.handler_class(self.application, self.request,
- **self.handler_kwargs)
- transforms = [t(self.request) for t in self.application.transforms]
-
- if self.stream_request_body:
- self.handler._prepared_future = Future()
- # Note that if an exception escapes handler._execute it will be
- # trapped in the Future it returns (which we are ignoring here,
- # leaving it to be logged when the Future is GC'd).
- # However, that shouldn't happen because _execute has a blanket
- # except handler, and we cannot easily access the IOLoop here to
- # call add_future (because of the requirement to remain compatible
- # with WSGI)
- self.handler._execute(transforms, *self.path_args,
- **self.path_kwargs)
- # If we are streaming the request body, then execute() is finished
- # when the handler has prepared to receive the body. If not,
- # it doesn't matter when execute() finishes (so we return None)
- return self.handler._prepared_future
-
-
-class HTTPError(Exception):
- """An exception that will turn into an HTTP error response.
-
- Raising an `HTTPError` is a convenient alternative to calling
- `RequestHandler.send_error` since it automatically ends the
- current function.
-
- To customize the response sent with an `HTTPError`, override
- `RequestHandler.write_error`.
-
- :arg int status_code: HTTP status code. Must be listed in
- `httplib.responses <http.client.responses>` unless the ``reason``
- keyword argument is given.
- :arg string log_message: Message to be written to the log for this error
- (will not be shown to the user unless the `Application` is in debug
- mode). May contain ``%s``-style placeholders, which will be filled
- in with remaining positional parameters.
- :arg string reason: Keyword-only argument. The HTTP "reason" phrase
- to pass in the status line along with ``status_code``. Normally
- determined automatically from ``status_code``, but can be used
- to use a non-standard numeric code.
- """
- def __init__(self, status_code=500, log_message=None, *args, **kwargs):
- self.status_code = status_code
- self.log_message = log_message
- self.args = args
- self.reason = kwargs.get('reason', None)
- if log_message and not args:
- self.log_message = log_message.replace('%', '%%')
-
- def __str__(self):
- message = "HTTP %d: %s" % (
- self.status_code,
- self.reason or httputil.responses.get(self.status_code, 'Unknown'))
- if self.log_message:
- return message + " (" + (self.log_message % self.args) + ")"
- else:
- return message
-
-
-class Finish(Exception):
- """An exception that ends the request without producing an error response.
-
- When `Finish` is raised in a `RequestHandler`, the request will
- end (calling `RequestHandler.finish` if it hasn't already been
- called), but the error-handling methods (including
- `RequestHandler.write_error`) will not be called.
-
- If `Finish()` was created with no arguments, the pending response
- will be sent as-is. If `Finish()` was given an argument, that
- argument will be passed to `RequestHandler.finish()`.
-
- This can be a more convenient way to implement custom error pages
- than overriding ``write_error`` (especially in library code)::
-
- if self.current_user is None:
- self.set_status(401)
- self.set_header('WWW-Authenticate', 'Basic realm="something"')
- raise Finish()
-
- .. versionchanged:: 4.3
- Arguments passed to ``Finish()`` will be passed on to
- `RequestHandler.finish`.
- """
- pass
-
-
-class MissingArgumentError(HTTPError):
- """Exception raised by `RequestHandler.get_argument`.
-
- This is a subclass of `HTTPError`, so if it is uncaught a 400 response
- code will be used instead of 500 (and a stack trace will not be logged).
-
- .. versionadded:: 3.1
- """
- def __init__(self, arg_name):
- super(MissingArgumentError, self).__init__(
- 400, 'Missing argument %s' % arg_name)
- self.arg_name = arg_name
-
-
-class ErrorHandler(RequestHandler):
- """Generates an error response with ``status_code`` for all requests."""
- def initialize(self, status_code):
- self.set_status(status_code)
-
- def prepare(self):
- raise HTTPError(self._status_code)
-
- def check_xsrf_cookie(self):
- # POSTs to an ErrorHandler don't actually have side effects,
- # so we don't need to check the xsrf token. This allows POSTs
- # to the wrong url to return a 404 instead of 403.
- pass
-
-
-class RedirectHandler(RequestHandler):
- """Redirects the client to the given URL for all GET requests.
-
- You should provide the keyword argument ``url`` to the handler, e.g.::
-
- application = web.Application([
- (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
- ])
-
- `RedirectHandler` supports regular expression substitutions. E.g., to
- swap the first and second parts of a path while preserving the remainder::
-
- application = web.Application([
- (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
- ])
-
- The final URL is formatted with `str.format` and the substrings that match
- the capturing groups. In the above example, a request to "/a/b/c" would be
- formatted like::
-
- str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
-
- Use Python's :ref:`format string syntax <formatstrings>` to customize how
- values are substituted.
-
- .. versionchanged:: 4.5
- Added support for substitutions into the destination URL.
- """
- def initialize(self, url, permanent=True):
- self._url = url
- self._permanent = permanent
-
- def get(self, *args):
- self.redirect(self._url.format(*args), permanent=self._permanent)
-
-
-class StaticFileHandler(RequestHandler):
- """A simple handler that can serve static content from a directory.
-
- A `StaticFileHandler` is configured automatically if you pass the
- ``static_path`` keyword argument to `Application`. This handler
- can be customized with the ``static_url_prefix``, ``static_handler_class``,
- and ``static_handler_args`` settings.
-
- To map an additional path to this handler for a static data directory
- you would add a line to your application like::
-
- application = web.Application([
- (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
- ])
-
- The handler constructor requires a ``path`` argument, which specifies the
- local root directory of the content to be served.
-
- Note that a capture group in the regex is required to parse the value for
- the ``path`` argument to the get() method (different than the constructor
- argument above); see `URLSpec` for details.
-
- To serve a file like ``index.html`` automatically when a directory is
- requested, set ``static_handler_args=dict(default_filename="index.html")``
- in your application settings, or add ``default_filename`` as an initializer
- argument for your ``StaticFileHandler``.
-
- To maximize the effectiveness of browser caching, this class supports
- versioned urls (by default using the argument ``?v=``). If a version
- is given, we instruct the browser to cache this file indefinitely.
- `make_static_url` (also available as `RequestHandler.static_url`) can
- be used to construct a versioned url.
-
- This handler is intended primarily for use in development and light-duty
- file serving; for heavy traffic it will be more efficient to use
- a dedicated static file server (such as nginx or Apache). We support
- the HTTP ``Accept-Ranges`` mechanism to return partial content (because
- some browsers require this functionality to be present to seek in
- HTML5 audio or video).
-
- **Subclassing notes**
-
- This class is designed to be extensible by subclassing, but because
- of the way static urls are generated with class methods rather than
- instance methods, the inheritance patterns are somewhat unusual.
- Be sure to use the ``@classmethod`` decorator when overriding a
- class method. Instance methods may use the attributes ``self.path``
- ``self.absolute_path``, and ``self.modified``.
-
- Subclasses should only override methods discussed in this section;
- overriding other methods is error-prone. Overriding
- ``StaticFileHandler.get`` is particularly problematic due to the
- tight coupling with ``compute_etag`` and other methods.
-
- To change the way static urls are generated (e.g. to match the behavior
- of another server or CDN), override `make_static_url`, `parse_url_path`,
- `get_cache_time`, and/or `get_version`.
-
- To replace all interaction with the filesystem (e.g. to serve
- static content from a database), override `get_content`,
- `get_content_size`, `get_modified_time`, `get_absolute_path`, and
- `validate_absolute_path`.
-
- .. versionchanged:: 3.1
- Many of the methods for subclasses were added in Tornado 3.1.
- """
- CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
-
- _static_hashes = {} # type: typing.Dict
- _lock = threading.Lock() # protects _static_hashes
-
- def initialize(self, path, default_filename=None):
- self.root = path
- self.default_filename = default_filename
-
- @classmethod
- def reset(cls):
- with cls._lock:
- cls._static_hashes = {}
-
- def head(self, path):
- return self.get(path, include_body=False)
-
- @gen.coroutine
- def get(self, path, include_body=True):
- # Set up our path instance variables.
- self.path = self.parse_url_path(path)
- del path # make sure we don't refer to path instead of self.path again
- absolute_path = self.get_absolute_path(self.root, self.path)
- self.absolute_path = self.validate_absolute_path(
- self.root, absolute_path)
- if self.absolute_path is None:
- return
-
- self.modified = self.get_modified_time()
- self.set_headers()
-
- if self.should_return_304():
- self.set_status(304)
- return
-
- request_range = None
- range_header = self.request.headers.get("Range")
- if range_header:
- # As per RFC 2616 14.16, if an invalid Range header is specified,
- # the request will be treated as if the header didn't exist.
- request_range = httputil._parse_request_range(range_header)
-
- size = self.get_content_size()
- if request_range:
- start, end = request_range
- if (start is not None and start >= size) or end == 0:
- # As per RFC 2616 14.35.1, a range is not satisfiable only: if
- # the first requested byte is equal to or greater than the
- # content, or when a suffix with length 0 is specified
- self.set_status(416) # Range Not Satisfiable
- self.set_header("Content-Type", "text/plain")
- self.set_header("Content-Range", "bytes */%s" % (size, ))
- return
- if start is not None and start < 0:
- start += size
- if end is not None and end > size:
- # Clients sometimes blindly use a large range to limit their
- # download size; cap the endpoint at the actual file size.
- end = size
- # Note: only return HTTP 206 if less than the entire range has been
- # requested. Not only is this semantically correct, but Chrome
- # refuses to play audio if it gets an HTTP 206 in response to
- # ``Range: bytes=0-``.
- if size != (end or size) - (start or 0):
- self.set_status(206) # Partial Content
- self.set_header("Content-Range",
- httputil._get_content_range(start, end, size))
- else:
- start = end = None
-
- if start is not None and end is not None:
- content_length = end - start
- elif end is not None:
- content_length = end
- elif start is not None:
- content_length = size - start
- else:
- content_length = size
- self.set_header("Content-Length", content_length)
-
- if include_body:
- content = self.get_content(self.absolute_path, start, end)
- if isinstance(content, bytes):
- content = [content]
- for chunk in content:
- try:
- self.write(chunk)
- yield self.flush()
- except iostream.StreamClosedError:
- return
- else:
- assert self.request.method == "HEAD"
-
- def compute_etag(self):
- """Sets the ``Etag`` header based on static url version.
-
- This allows efficient ``If-None-Match`` checks against cached
- versions, and sends the correct ``Etag`` for a partial response
- (i.e. the same ``Etag`` as the full file).
-
- .. versionadded:: 3.1
- """
- version_hash = self._get_cached_version(self.absolute_path)
- if not version_hash:
- return None
- return '"%s"' % (version_hash, )
-
- def set_headers(self):
- """Sets the content and caching headers on the response.
-
- .. versionadded:: 3.1
- """
- self.set_header("Accept-Ranges", "bytes")
- self.set_etag_header()
-
- if self.modified is not None:
- self.set_header("Last-Modified", self.modified)
-
- content_type = self.get_content_type()
- if content_type:
- self.set_header("Content-Type", content_type)
-
- cache_time = self.get_cache_time(self.path, self.modified,
- content_type)
- if cache_time > 0:
- self.set_header("Expires", datetime.datetime.utcnow() +
- datetime.timedelta(seconds=cache_time))
- self.set_header("Cache-Control", "max-age=" + str(cache_time))
-
- self.set_extra_headers(self.path)
-
- def should_return_304(self):
- """Returns True if the headers indicate that we should return 304.
-
- .. versionadded:: 3.1
- """
- if self.check_etag_header():
- return True
-
- # Check the If-Modified-Since, and don't send the result if the
- # content has not been modified
- ims_value = self.request.headers.get("If-Modified-Since")
- if ims_value is not None:
- date_tuple = email.utils.parsedate(ims_value)
- if date_tuple is not None:
- if_since = datetime.datetime(*date_tuple[:6])
- if if_since >= self.modified:
- return True
-
- return False
-
- @classmethod
- def get_absolute_path(cls, root, path):
- """Returns the absolute location of ``path`` relative to ``root``.
-
- ``root`` is the path configured for this `StaticFileHandler`
- (in most cases the ``static_path`` `Application` setting).
-
- This class method may be overridden in subclasses. By default
- it returns a filesystem path, but other strings may be used
- as long as they are unique and understood by the subclass's
- overridden `get_content`.
-
- .. versionadded:: 3.1
- """
- abspath = os.path.abspath(os.path.join(root, path))
- return abspath
-
- def validate_absolute_path(self, root, absolute_path):
- """Validate and return the absolute path.
-
- ``root`` is the configured path for the `StaticFileHandler`,
- and ``path`` is the result of `get_absolute_path`
-
- This is an instance method called during request processing,
- so it may raise `HTTPError` or use methods like
- `RequestHandler.redirect` (return None after redirecting to
- halt further processing). This is where 404 errors for missing files
- are generated.
-
- This method may modify the path before returning it, but note that
- any such modifications will not be understood by `make_static_url`.
-
- In instance methods, this method's result is available as
- ``self.absolute_path``.
-
- .. versionadded:: 3.1
- """
- # os.path.abspath strips a trailing /.
- # We must add it back to `root` so that we only match files
- # in a directory named `root` instead of files starting with
- # that prefix.
- root = os.path.abspath(root)
- if not root.endswith(os.path.sep):
- # abspath always removes a trailing slash, except when
- # root is '/'. This is an unusual case, but several projects
- # have independently discovered this technique to disable
- # Tornado's path validation and (hopefully) do their own,
- # so we need to support it.
- root += os.path.sep
- # The trailing slash also needs to be temporarily added back
- # the requested path so a request to root/ will match.
- if not (absolute_path + os.path.sep).startswith(root):
- raise HTTPError(403, "%s is not in root static directory",
- self.path)
- if (os.path.isdir(absolute_path) and
- self.default_filename is not None):
- # need to look at the request.path here for when path is empty
- # but there is some prefix to the path that was already
- # trimmed by the routing
- if not self.request.path.endswith("/"):
- self.redirect(self.request.path + "/", permanent=True)
- return
- absolute_path = os.path.join(absolute_path, self.default_filename)
- if not os.path.exists(absolute_path):
- raise HTTPError(404)
- if not os.path.isfile(absolute_path):
- raise HTTPError(403, "%s is not a file", self.path)
- return absolute_path
-
- @classmethod
- def get_content(cls, abspath, start=None, end=None):
- """Retrieve the content of the requested resource which is located
- at the given absolute path.
-
- This class method may be overridden by subclasses. Note that its
- signature is different from other overridable class methods
- (no ``settings`` argument); this is deliberate to ensure that
- ``abspath`` is able to stand on its own as a cache key.
-
- This method should either return a byte string or an iterator
- of byte strings. The latter is preferred for large files
- as it helps reduce memory fragmentation.
-
- .. versionadded:: 3.1
- """
- with open(abspath, "rb") as file:
- if start is not None:
- file.seek(start)
- if end is not None:
- remaining = end - (start or 0)
- else:
- remaining = None
- while True:
- chunk_size = 64 * 1024
- if remaining is not None and remaining < chunk_size:
- chunk_size = remaining
- chunk = file.read(chunk_size)
- if chunk:
- if remaining is not None:
- remaining -= len(chunk)
- yield chunk
- else:
- if remaining is not None:
- assert remaining == 0
- return
-
- @classmethod
- def get_content_version(cls, abspath):
- """Returns a version string for the resource at the given path.
-
- This class method may be overridden by subclasses. The
- default implementation is a hash of the file's contents.
-
- .. versionadded:: 3.1
- """
- data = cls.get_content(abspath)
- hasher = hashlib.md5()
- if isinstance(data, bytes):
- hasher.update(data)
- else:
- for chunk in data:
- hasher.update(chunk)
- return hasher.hexdigest()
-
- def _stat(self):
- if not hasattr(self, '_stat_result'):
- self._stat_result = os.stat(self.absolute_path)
- return self._stat_result
-
- def get_content_size(self):
- """Retrieve the total size of the resource at the given path.
-
- This method may be overridden by subclasses.
-
- .. versionadded:: 3.1
-
- .. versionchanged:: 4.0
- This method is now always called, instead of only when
- partial results are requested.
- """
- stat_result = self._stat()
- return stat_result[stat.ST_SIZE]
-
- def get_modified_time(self):
- """Returns the time that ``self.absolute_path`` was last modified.
-
- May be overridden in subclasses. Should return a `~datetime.datetime`
- object or None.
-
- .. versionadded:: 3.1
- """
- stat_result = self._stat()
- modified = datetime.datetime.utcfromtimestamp(
- stat_result[stat.ST_MTIME])
- return modified
-
- def get_content_type(self):
- """Returns the ``Content-Type`` header to be used for this request.
-
- .. versionadded:: 3.1
- """
- mime_type, encoding = mimetypes.guess_type(self.absolute_path)
- # per RFC 6713, use the appropriate type for a gzip compressed file
- if encoding == "gzip":
- return "application/gzip"
- # As of 2015-07-21 there is no bzip2 encoding defined at
- # http://www.iana.org/assignments/media-types/media-types.xhtml
- # So for that (and any other encoding), use octet-stream.
- elif encoding is not None:
- return "application/octet-stream"
- elif mime_type is not None:
- return mime_type
- # if mime_type not detected, use application/octet-stream
- else:
- return "application/octet-stream"
-
- def set_extra_headers(self, path):
- """For subclass to add extra headers to the response"""
- pass
-
- def get_cache_time(self, path, modified, mime_type):
- """Override to customize cache control behavior.
-
- Return a positive number of seconds to make the result
- cacheable for that amount of time or 0 to mark resource as
- cacheable for an unspecified amount of time (subject to
- browser heuristics).
-
- By default returns cache expiry of 10 years for resources requested
- with ``v`` argument.
- """
- return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
-
- @classmethod
- def make_static_url(cls, settings, path, include_version=True):
- """Constructs a versioned url for the given path.
-
- This method may be overridden in subclasses (but note that it
- is a class method rather than an instance method). Subclasses
- are only required to implement the signature
- ``make_static_url(cls, settings, path)``; other keyword
- arguments may be passed through `~RequestHandler.static_url`
- but are not standard.
-
- ``settings`` is the `Application.settings` dictionary. ``path``
- is the static path being requested. The url returned should be
- relative to the current host.
-
- ``include_version`` determines whether the generated URL should
- include the query string containing the version hash of the
- file corresponding to the given ``path``.
-
- """
- url = settings.get('static_url_prefix', '/static/') + path
- if not include_version:
- return url
-
- version_hash = cls.get_version(settings, path)
- if not version_hash:
- return url
-
- return '%s?v=%s' % (url, version_hash)
-
- def parse_url_path(self, url_path):
- """Converts a static URL path into a filesystem path.
-
- ``url_path`` is the path component of the URL with
- ``static_url_prefix`` removed. The return value should be
- filesystem path relative to ``static_path``.
-
- This is the inverse of `make_static_url`.
- """
- if os.path.sep != "/":
- url_path = url_path.replace("/", os.path.sep)
- return url_path
-
- @classmethod
- def get_version(cls, settings, path):
- """Generate the version string to be used in static URLs.
-
- ``settings`` is the `Application.settings` dictionary and ``path``
- is the relative location of the requested asset on the filesystem.
- The returned value should be a string, or ``None`` if no version
- could be determined.
-
- .. versionchanged:: 3.1
- This method was previously recommended for subclasses to override;
- `get_content_version` is now preferred as it allows the base
- class to handle caching of the result.
- """
- abs_path = cls.get_absolute_path(settings['static_path'], path)
- return cls._get_cached_version(abs_path)
-
- @classmethod
- def _get_cached_version(cls, abs_path):
- with cls._lock:
- hashes = cls._static_hashes
- if abs_path not in hashes:
- try:
- hashes[abs_path] = cls.get_content_version(abs_path)
- except Exception:
- gen_log.error("Could not open static file %r", abs_path)
- hashes[abs_path] = None
- hsh = hashes.get(abs_path)
- if hsh:
- return hsh
- return None
-
-
-class FallbackHandler(RequestHandler):
- """A `RequestHandler` that wraps another HTTP server callback.
-
- The fallback is a callable object that accepts an
- `~.httputil.HTTPServerRequest`, such as an `Application` or
- `tornado.wsgi.WSGIContainer`. This is most useful to use both
- Tornado ``RequestHandlers`` and WSGI in the same server. Typical
- usage::
-
- wsgi_app = tornado.wsgi.WSGIContainer(
- django.core.handlers.wsgi.WSGIHandler())
- application = tornado.web.Application([
- (r"/foo", FooHandler),
- (r".*", FallbackHandler, dict(fallback=wsgi_app),
- ])
- """
- def initialize(self, fallback):
- self.fallback = fallback
-
- def prepare(self):
- self.fallback(self.request)
- self._finished = True
-
-
-class OutputTransform(object):
- """A transform modifies the result of an HTTP request (e.g., GZip encoding)
-
- Applications are not expected to create their own OutputTransforms
- or interact with them directly; the framework chooses which transforms
- (if any) to apply.
- """
- def __init__(self, request):
- pass
-
- def transform_first_chunk(self, status_code, headers, chunk, finishing):
- # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
- return status_code, headers, chunk
-
- def transform_chunk(self, chunk, finishing):
- return chunk
-
-
-class GZipContentEncoding(OutputTransform):
- """Applies the gzip content encoding to the response.
-
- See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
-
- .. versionchanged:: 4.0
- Now compresses all mime types beginning with ``text/``, instead
- of just a whitelist. (the whitelist is still used for certain
- non-text mime types).
- """
- # Whitelist of compressible mime types (in addition to any types
- # beginning with "text/").
- CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
- "application/xml", "application/atom+xml",
- "application/json", "application/xhtml+xml",
- "image/svg+xml"])
- # Python's GzipFile defaults to level 9, while most other gzip
- # tools (including gzip itself) default to 6, which is probably a
- # better CPU/size tradeoff.
- GZIP_LEVEL = 6
- # Responses that are too short are unlikely to benefit from gzipping
- # after considering the "Content-Encoding: gzip" header and the header
- # inside the gzip encoding.
- # Note that responses written in multiple chunks will be compressed
- # regardless of size.
- MIN_LENGTH = 1024
-
- def __init__(self, request):
- self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
-
- def _compressible_type(self, ctype):
- return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
-
- def transform_first_chunk(self, status_code, headers, chunk, finishing):
- # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
- # TODO: can/should this type be inherited from the superclass?
- if 'Vary' in headers:
- headers['Vary'] += ', Accept-Encoding'
- else:
- headers['Vary'] = 'Accept-Encoding'
- if self._gzipping:
- ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
- self._gzipping = self._compressible_type(ctype) and \
- (not finishing or len(chunk) >= self.MIN_LENGTH) and \
- ("Content-Encoding" not in headers)
- if self._gzipping:
- headers["Content-Encoding"] = "gzip"
- self._gzip_value = BytesIO()
- self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
- compresslevel=self.GZIP_LEVEL)
- chunk = self.transform_chunk(chunk, finishing)
- if "Content-Length" in headers:
- # The original content length is no longer correct.
- # If this is the last (and only) chunk, we can set the new
- # content-length; otherwise we remove it and fall back to
- # chunked encoding.
- if finishing:
- headers["Content-Length"] = str(len(chunk))
- else:
- del headers["Content-Length"]
- return status_code, headers, chunk
-
- def transform_chunk(self, chunk, finishing):
- if self._gzipping:
- self._gzip_file.write(chunk)
- if finishing:
- self._gzip_file.close()
- else:
- self._gzip_file.flush()
- chunk = self._gzip_value.getvalue()
- self._gzip_value.truncate(0)
- self._gzip_value.seek(0)
- return chunk
-
-
-def authenticated(method):
- """Decorate methods with this to require that the user be logged in.
-
- If the user is not logged in, they will be redirected to the configured
- `login url <RequestHandler.get_login_url>`.
-
- If you configure a login url with a query parameter, Tornado will
- assume you know what you're doing and use it as-is. If not, it
- will add a `next` parameter so the login page knows where to send
- you once you're logged in.
- """
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- if not self.current_user:
- if self.request.method in ("GET", "HEAD"):
- url = self.get_login_url()
- if "?" not in url:
- if urlparse.urlsplit(url).scheme:
- # if login url is absolute, make next absolute too
- next_url = self.request.full_url()
- else:
- next_url = self.request.uri
- url += "?" + urlencode(dict(next=next_url))
- self.redirect(url)
- return
- raise HTTPError(403)
- return method(self, *args, **kwargs)
- return wrapper
-
-
-class UIModule(object):
- """A re-usable, modular UI unit on a page.
-
- UI modules often execute additional queries, and they can include
- additional CSS and JavaScript that will be included in the output
- page, which is automatically inserted on page render.
-
- Subclasses of UIModule must override the `render` method.
- """
- def __init__(self, handler):
- self.handler = handler
- self.request = handler.request
- self.ui = handler.ui
- self.locale = handler.locale
-
- @property
- def current_user(self):
- return self.handler.current_user
-
- def render(self, *args, **kwargs):
- """Override in subclasses to return this module's output."""
- raise NotImplementedError()
-
- def embedded_javascript(self):
- """Override to return a JavaScript string
- to be embedded in the page."""
- return None
-
- def javascript_files(self):
- """Override to return a list of JavaScript files needed by this module.
-
- If the return values are relative paths, they will be passed to
- `RequestHandler.static_url`; otherwise they will be used as-is.
- """
- return None
-
- def embedded_css(self):
- """Override to return a CSS string
- that will be embedded in the page."""
- return None
-
- def css_files(self):
- """Override to returns a list of CSS files required by this module.
-
- If the return values are relative paths, they will be passed to
- `RequestHandler.static_url`; otherwise they will be used as-is.
- """
- return None
-
- def html_head(self):
- """Override to return an HTML string that will be put in the <head/>
- element.
- """
- return None
-
- def html_body(self):
- """Override to return an HTML string that will be put at the end of
- the <body/> element.
- """
- return None
-
- def render_string(self, path, **kwargs):
- """Renders a template and returns it as a string."""
- return self.handler.render_string(path, **kwargs)
-
-
-class _linkify(UIModule):
- def render(self, text, **kwargs):
- return escape.linkify(text, **kwargs)
-
-
-class _xsrf_form_html(UIModule):
- def render(self):
- return self.handler.xsrf_form_html()
-
-
-class TemplateModule(UIModule):
- """UIModule that simply renders the given template.
-
- {% module Template("foo.html") %} is similar to {% include "foo.html" %},
- but the module version gets its own namespace (with kwargs passed to
- Template()) instead of inheriting the outer template's namespace.
-
- Templates rendered through this module also get access to UIModule's
- automatic javascript/css features. Simply call set_resources
- inside the template and give it keyword arguments corresponding to
- the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
- Note that these resources are output once per template file, not once
- per instantiation of the template, so they must not depend on
- any arguments to the template.
- """
- def __init__(self, handler):
- super(TemplateModule, self).__init__(handler)
- # keep resources in both a list and a dict to preserve order
- self._resource_list = []
- self._resource_dict = {}
-
- def render(self, path, **kwargs):
- def set_resources(**kwargs):
- if path not in self._resource_dict:
- self._resource_list.append(kwargs)
- self._resource_dict[path] = kwargs
- else:
- if self._resource_dict[path] != kwargs:
- raise ValueError("set_resources called with different "
- "resources for the same template")
- return ""
- return self.render_string(path, set_resources=set_resources,
- **kwargs)
-
- def _get_resources(self, key):
- return (r[key] for r in self._resource_list if key in r)
-
- def embedded_javascript(self):
- return "\n".join(self._get_resources("embedded_javascript"))
-
- def javascript_files(self):
- result = []
- for f in self._get_resources("javascript_files"):
- if isinstance(f, (unicode_type, bytes)):
- result.append(f)
- else:
- result.extend(f)
- return result
-
- def embedded_css(self):
- return "\n".join(self._get_resources("embedded_css"))
-
- def css_files(self):
- result = []
- for f in self._get_resources("css_files"):
- if isinstance(f, (unicode_type, bytes)):
- result.append(f)
- else:
- result.extend(f)
- return result
-
- def html_head(self):
- return "".join(self._get_resources("html_head"))
-
- def html_body(self):
- return "".join(self._get_resources("html_body"))
-
-
-class _UIModuleNamespace(object):
- """Lazy namespace which creates UIModule proxies bound to a handler."""
- def __init__(self, handler, ui_modules):
- self.handler = handler
- self.ui_modules = ui_modules
-
- def __getitem__(self, key):
- return self.handler._ui_module(key, self.ui_modules[key])
-
- def __getattr__(self, key):
- try:
- return self[key]
- except KeyError as e:
- raise AttributeError(str(e))
-
-
-if hasattr(hmac, 'compare_digest'): # python 3.3
- _time_independent_equals = hmac.compare_digest
-else:
- def _time_independent_equals(a, b):
- if len(a) != len(b):
- return False
- result = 0
- if isinstance(a[0], int): # python3 byte strings
- for x, y in zip(a, b):
- result |= x ^ y
- else: # python2
- for x, y in zip(a, b):
- result |= ord(x) ^ ord(y)
- return result == 0
-
-
-def create_signed_value(secret, name, value, version=None, clock=None,
- key_version=None):
- if version is None:
- version = DEFAULT_SIGNED_VALUE_VERSION
- if clock is None:
- clock = time.time
-
- timestamp = utf8(str(int(clock())))
- value = base64.b64encode(utf8(value))
- if version == 1:
- signature = _create_signature_v1(secret, name, value, timestamp)
- value = b"|".join([value, timestamp, signature])
- return value
- elif version == 2:
- # The v2 format consists of a version number and a series of
- # length-prefixed fields "%d:%s", the last of which is a
- # signature, all separated by pipes. All numbers are in
- # decimal format with no leading zeros. The signature is an
- # HMAC-SHA256 of the whole string up to that point, including
- # the final pipe.
- #
- # The fields are:
- # - format version (i.e. 2; no length prefix)
- # - key version (integer, default is 0)
- # - timestamp (integer seconds since epoch)
- # - name (not encoded; assumed to be ~alphanumeric)
- # - value (base64-encoded)
- # - signature (hex-encoded; no length prefix)
- def format_field(s):
- return utf8("%d:" % len(s)) + utf8(s)
- to_sign = b"|".join([
- b"2",
- format_field(str(key_version or 0)),
- format_field(timestamp),
- format_field(name),
- format_field(value),
- b''])
-
- if isinstance(secret, dict):
- assert key_version is not None, 'Key version must be set when sign key dict is used'
- assert version >= 2, 'Version must be at least 2 for key version support'
- secret = secret[key_version]
-
- signature = _create_signature_v2(secret, to_sign)
- return to_sign + signature
- else:
- raise ValueError("Unsupported version %d" % version)
-
-
-# A leading version number in decimal
-# with no leading zeros, followed by a pipe.
-_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
-
-
-def _get_version(value):
- # Figures out what version value is. Version 1 did not include an
- # explicit version field and started with arbitrary base64 data,
- # which makes this tricky.
- m = _signed_value_version_re.match(value)
- if m is None:
- version = 1
- else:
- try:
- version = int(m.group(1))
- if version > 999:
- # Certain payloads from the version-less v1 format may
- # be parsed as valid integers. Due to base64 padding
- # restrictions, this can only happen for numbers whose
- # length is a multiple of 4, so we can treat all
- # numbers up to 999 as versions, and for the rest we
- # fall back to v1 format.
- version = 1
- except ValueError:
- version = 1
- return version
-
-
-def decode_signed_value(secret, name, value, max_age_days=31,
- clock=None, min_version=None):
- if clock is None:
- clock = time.time
- if min_version is None:
- min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
- if min_version > 2:
- raise ValueError("Unsupported min_version %d" % min_version)
- if not value:
- return None
-
- value = utf8(value)
- version = _get_version(value)
-
- if version < min_version:
- return None
- if version == 1:
- return _decode_signed_value_v1(secret, name, value,
- max_age_days, clock)
- elif version == 2:
- return _decode_signed_value_v2(secret, name, value,
- max_age_days, clock)
- else:
- return None
-
-
-def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
- parts = utf8(value).split(b"|")
- if len(parts) != 3:
- return None
- signature = _create_signature_v1(secret, name, parts[0], parts[1])
- if not _time_independent_equals(parts[2], signature):
- gen_log.warning("Invalid cookie signature %r", value)
- return None
- timestamp = int(parts[1])
- if timestamp < clock() - max_age_days * 86400:
- gen_log.warning("Expired cookie %r", value)
- return None
- if timestamp > clock() + 31 * 86400:
- # _cookie_signature does not hash a delimiter between the
- # parts of the cookie, so an attacker could transfer trailing
- # digits from the payload to the timestamp without altering the
- # signature. For backwards compatibility, sanity-check timestamp
- # here instead of modifying _cookie_signature.
- gen_log.warning("Cookie timestamp in future; possible tampering %r",
- value)
- return None
- if parts[1].startswith(b"0"):
- gen_log.warning("Tampered cookie %r", value)
- return None
- try:
- return base64.b64decode(parts[0])
- except Exception:
- return None
-
-
-def _decode_fields_v2(value):
- def _consume_field(s):
- length, _, rest = s.partition(b':')
- n = int(length)
- field_value = rest[:n]
- # In python 3, indexing bytes returns small integers; we must
- # use a slice to get a byte string as in python 2.
- if rest[n:n + 1] != b'|':
- raise ValueError("malformed v2 signed value field")
- rest = rest[n + 1:]
- return field_value, rest
-
- rest = value[2:] # remove version number
- key_version, rest = _consume_field(rest)
- timestamp, rest = _consume_field(rest)
- name_field, rest = _consume_field(rest)
- value_field, passed_sig = _consume_field(rest)
- return int(key_version), timestamp, name_field, value_field, passed_sig
-
-
-def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
- try:
- key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
- except ValueError:
- return None
- signed_string = value[:-len(passed_sig)]
-
- if isinstance(secret, dict):
- try:
- secret = secret[key_version]
- except KeyError:
- return None
-
- expected_sig = _create_signature_v2(secret, signed_string)
- if not _time_independent_equals(passed_sig, expected_sig):
- return None
- if name_field != utf8(name):
- return None
- timestamp = int(timestamp)
- if timestamp < clock() - max_age_days * 86400:
- # The signature has expired.
- return None
- try:
- return base64.b64decode(value_field)
- except Exception:
- return None
-
-
-def get_signature_key_version(value):
- value = utf8(value)
- version = _get_version(value)
- if version < 2:
- return None
- try:
- key_version, _, _, _, _ = _decode_fields_v2(value)
- except ValueError:
- return None
-
- return key_version
-
-
-def _create_signature_v1(secret, *parts):
- hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
- for part in parts:
- hash.update(utf8(part))
- return utf8(hash.hexdigest())
-
-
-def _create_signature_v2(secret, s):
- hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
- hash.update(utf8(s))
- return utf8(hash.hexdigest())
-
-
-def is_absolute(path):
- return any(path.startswith(x) for x in ["/", "http:", "https:"])
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""``tornado.web`` provides a simple web framework with asynchronous
+features that allow it to scale to large numbers of open connections,
+making it ideal for `long polling
+<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
+
+Here is a simple "Hello, world" example app:
+
+.. testcode::
+
+ import tornado.ioloop
+ import tornado.web
+
+ class MainHandler(tornado.web.RequestHandler):
+ def get(self):
+ self.write("Hello, world")
+
+ if __name__ == "__main__":
+ application = tornado.web.Application([
+ (r"/", MainHandler),
+ ])
+ application.listen(8888)
+ tornado.ioloop.IOLoop.current().start()
+
+.. testoutput::
+ :hide:
+
+
+See the :doc:`guide` for additional information.
+
+Thread-safety notes
+-------------------
+
+In general, methods on `RequestHandler` and elsewhere in Tornado are
+not thread-safe. In particular, methods such as
+`~RequestHandler.write()`, `~RequestHandler.finish()`, and
+`~RequestHandler.flush()` must only be called from the main thread. If
+you use multiple threads it is important to use `.IOLoop.add_callback`
+to transfer control back to the main thread before finishing the
+request.
+
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import base64
+import binascii
+import datetime
+import email.utils
+import functools
+import gzip
+import hashlib
+import hmac
+import mimetypes
+import numbers
+import os.path
+import re
+import stat
+import sys
+import threading
+import time
+import tornado
+import traceback
+import types
+from inspect import isclass
+from io import BytesIO
+
+from tornado.concurrent import Future
+from tornado import escape
+from tornado import gen
+from tornado import httputil
+from tornado import iostream
+from tornado import locale
+from tornado.log import access_log, app_log, gen_log
+from tornado import stack_context
+from tornado import template
+from tornado.escape import utf8, _unicode
+from tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
+ ReversibleRouter, Rule, ReversibleRuleRouter,
+ URLSpec)
+from tornado.util import (ObjectDict, raise_exc_info,
+ unicode_type, _websocket_mask, PY3)
+
+url = URLSpec
+
+if PY3:
+ import http.cookies as Cookie
+ import urllib.parse as urlparse
+ from urllib.parse import urlencode
+else:
+ import Cookie
+ import urlparse
+ from urllib import urlencode
+
+try:
+ import typing # noqa
+
+ # The following types are accepted by RequestHandler.set_header
+ # and related methods.
+ _HeaderTypes = typing.Union[bytes, unicode_type,
+ numbers.Integral, datetime.datetime]
+except ImportError:
+ pass
+
+
+MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
+"""The oldest signed value version supported by this version of Tornado.
+
+Signed values older than this version cannot be decoded.
+
+.. versionadded:: 3.2.1
+"""
+
+MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
+"""The newest signed value version supported by this version of Tornado.
+
+Signed values newer than this version cannot be decoded.
+
+.. versionadded:: 3.2.1
+"""
+
+DEFAULT_SIGNED_VALUE_VERSION = 2
+"""The signed value version produced by `.RequestHandler.create_signed_value`.
+
+May be overridden by passing a ``version`` keyword argument.
+
+.. versionadded:: 3.2.1
+"""
+
+DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
+"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
+
+May be overridden by passing a ``min_version`` keyword argument.
+
+.. versionadded:: 3.2.1
+"""
+
+
+class RequestHandler(object):
+ """Base class for HTTP request handlers.
+
+ Subclasses must define at least one of the methods defined in the
+ "Entry points" section below.
+ """
+ SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
+ "OPTIONS")
+
+ _template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
+ _template_loader_lock = threading.Lock()
+ _remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
+
+ def __init__(self, application, request, **kwargs):
+ super(RequestHandler, self).__init__()
+
+ self.application = application
+ self.request = request
+ self._headers_written = False
+ self._finished = False
+ self._auto_finish = True
+ self._transforms = None # will be set in _execute
+ self._prepared_future = None
+ self._headers = None # type: httputil.HTTPHeaders
+ self.path_args = None
+ self.path_kwargs = None
+ self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
+ application.ui_methods.items())
+ # UIModules are available as both `modules` and `_tt_modules` in the
+ # template namespace. Historically only `modules` was available
+ # but could be clobbered by user additions to the namespace.
+ # The template {% module %} directive looks in `_tt_modules` to avoid
+ # possible conflicts.
+ self.ui["_tt_modules"] = _UIModuleNamespace(self,
+ application.ui_modules)
+ self.ui["modules"] = self.ui["_tt_modules"]
+ self.clear()
+ self.request.connection.set_close_callback(self.on_connection_close)
+ self.initialize(**kwargs)
+
+ def initialize(self):
+ """Hook for subclass initialization. Called for each request.
+
+ A dictionary passed as the third argument of a url spec will be
+ supplied as keyword arguments to initialize().
+
+ Example::
+
+ class ProfileHandler(RequestHandler):
+ def initialize(self, database):
+ self.database = database
+
+ def get(self, username):
+ ...
+
+ app = Application([
+ (r'/user/(.*)', ProfileHandler, dict(database=database)),
+ ])
+ """
+ pass
+
+ @property
+ def settings(self):
+ """An alias for `self.application.settings <Application.settings>`."""
+ return self.application.settings
+
+ def head(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def get(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def post(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def delete(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def patch(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def put(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def options(self, *args, **kwargs):
+ raise HTTPError(405)
+
+ def prepare(self):
+ """Called at the beginning of a request before `get`/`post`/etc.
+
+ Override this method to perform common initialization regardless
+ of the request method.
+
+ Asynchronous support: Decorate this method with `.gen.coroutine`
+ or `.return_future` to make it asynchronous (the
+ `asynchronous` decorator cannot be used on `prepare`).
+ If this method returns a `.Future` execution will not proceed
+ until the `.Future` is done.
+
+ .. versionadded:: 3.1
+ Asynchronous support.
+ """
+ pass
+
+ def on_finish(self):
+ """Called after the end of a request.
+
+ Override this method to perform cleanup, logging, etc.
+ This method is a counterpart to `prepare`. ``on_finish`` may
+ not produce any output, as it is called after the response
+ has been sent to the client.
+ """
+ pass
+
+ def on_connection_close(self):
+ """Called in async handlers if the client closed the connection.
+
+ Override this to clean up resources associated with
+ long-lived connections. Note that this method is called only if
+ the connection was closed during asynchronous processing; if you
+ need to do cleanup after every request override `on_finish`
+ instead.
+
+ Proxies may keep a connection open for a time (perhaps
+ indefinitely) after the client has gone away, so this method
+ may not be called promptly after the end user closes their
+ connection.
+ """
+ if _has_stream_request_body(self.__class__):
+ if not self.request.body.done():
+ self.request.body.set_exception(iostream.StreamClosedError())
+ self.request.body.exception()
+
+ def clear(self):
+ """Resets all headers and content for this response."""
+ self._headers = httputil.HTTPHeaders({
+ "Server": "TornadoServer/%s" % tornado.version,
+ "Content-Type": "text/html; charset=UTF-8",
+ "Date": httputil.format_timestamp(time.time()),
+ })
+ self.set_default_headers()
+ self._write_buffer = []
+ self._status_code = 200
+ self._reason = httputil.responses[200]
+
+ def set_default_headers(self):
+ """Override this to set HTTP headers at the beginning of the request.
+
+ For example, this is the place to set a custom ``Server`` header.
+ Note that setting such headers in the normal flow of request
+ processing may not do what you want, since headers may be reset
+ during error handling.
+ """
+ pass
+
+ def set_status(self, status_code, reason=None):
+ """Sets the status code for our response.
+
+ :arg int status_code: Response status code. If ``reason`` is ``None``,
+ it must be present in `httplib.responses <http.client.responses>`.
+ :arg string reason: Human-readable reason phrase describing the status
+ code. If ``None``, it will be filled in from
+ `httplib.responses <http.client.responses>`.
+ """
+ self._status_code = status_code
+ if reason is not None:
+ self._reason = escape.native_str(reason)
+ else:
+ try:
+ self._reason = httputil.responses[status_code]
+ except KeyError:
+ raise ValueError("unknown status code %d" % status_code)
+
+ def get_status(self):
+ """Returns the status code for our response."""
+ return self._status_code
+
+ def set_header(self, name, value):
+ # type: (str, _HeaderTypes) -> None
+ """Sets the given response header name and value.
+
+ If a datetime is given, we automatically format it according to the
+ HTTP specification. If the value is not a string, we convert it to
+ a string. All header values are then encoded as UTF-8.
+ """
+ self._headers[name] = self._convert_header_value(value)
+
+ def add_header(self, name, value):
+ # type: (str, _HeaderTypes) -> None
+ """Adds the given response header and value.
+
+ Unlike `set_header`, `add_header` may be called multiple times
+ to return multiple values for the same header.
+ """
+ self._headers.add(name, self._convert_header_value(value))
+
+ def clear_header(self, name):
+ """Clears an outgoing header, undoing a previous `set_header` call.
+
+ Note that this method does not apply to multi-valued headers
+ set by `add_header`.
+ """
+ if name in self._headers:
+ del self._headers[name]
+
+ _INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
+
+ def _convert_header_value(self, value):
+ # type: (_HeaderTypes) -> str
+
+ # Convert the input value to a str. This type check is a bit
+ # subtle: The bytes case only executes on python 3, and the
+ # unicode case only executes on python 2, because the other
+ # cases are covered by the first match for str.
+ if isinstance(value, str):
+ retval = value
+ elif isinstance(value, bytes): # py3
+ # Non-ascii characters in headers are not well supported,
+ # but if you pass bytes, use latin1 so they pass through as-is.
+ retval = value.decode('latin1')
+ elif isinstance(value, unicode_type): # py2
+ # TODO: This is inconsistent with the use of latin1 above,
+ # but it's been that way for a long time. Should it change?
+ retval = escape.utf8(value)
+ elif isinstance(value, numbers.Integral):
+ # return immediately since we know the converted value will be safe
+ return str(value)
+ elif isinstance(value, datetime.datetime):
+ return httputil.format_timestamp(value)
+ else:
+ raise TypeError("Unsupported header value %r" % value)
+ # If \n is allowed into the header, it is possible to inject
+ # additional headers or split the request.
+ if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
+ raise ValueError("Unsafe header value %r", retval)
+ return retval
+
+ _ARG_DEFAULT = object()
+
+ def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
+ """Returns the value of the argument with the given name.
+
+ If default is not provided, the argument is considered to be
+ required, and we raise a `MissingArgumentError` if it is missing.
+
+ If the argument appears in the url more than once, we return the
+ last value.
+
+ The returned value is always unicode.
+ """
+ return self._get_argument(name, default, self.request.arguments, strip)
+
+ def get_arguments(self, name, strip=True):
+ """Returns a list of the arguments with the given name.
+
+ If the argument is not present, returns an empty list.
+
+ The returned values are always unicode.
+ """
+
+ # Make sure `get_arguments` isn't accidentally being called with a
+ # positional argument that's assumed to be a default (like in
+ # `get_argument`.)
+ assert isinstance(strip, bool)
+
+ return self._get_arguments(name, self.request.arguments, strip)
+
+ def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
+ """Returns the value of the argument with the given name
+ from the request body.
+
+ If default is not provided, the argument is considered to be
+ required, and we raise a `MissingArgumentError` if it is missing.
+
+ If the argument appears in the url more than once, we return the
+ last value.
+
+ The returned value is always unicode.
+
+ .. versionadded:: 3.2
+ """
+ return self._get_argument(name, default, self.request.body_arguments,
+ strip)
+
+ def get_body_arguments(self, name, strip=True):
+ """Returns a list of the body arguments with the given name.
+
+ If the argument is not present, returns an empty list.
+
+ The returned values are always unicode.
+
+ .. versionadded:: 3.2
+ """
+ return self._get_arguments(name, self.request.body_arguments, strip)
+
+ def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
+ """Returns the value of the argument with the given name
+ from the request query string.
+
+ If default is not provided, the argument is considered to be
+ required, and we raise a `MissingArgumentError` if it is missing.
+
+ If the argument appears in the url more than once, we return the
+ last value.
+
+ The returned value is always unicode.
+
+ .. versionadded:: 3.2
+ """
+ return self._get_argument(name, default,
+ self.request.query_arguments, strip)
+
+ def get_query_arguments(self, name, strip=True):
+ """Returns a list of the query arguments with the given name.
+
+ If the argument is not present, returns an empty list.
+
+ The returned values are always unicode.
+
+ .. versionadded:: 3.2
+ """
+ return self._get_arguments(name, self.request.query_arguments, strip)
+
+ def _get_argument(self, name, default, source, strip=True):
+ args = self._get_arguments(name, source, strip=strip)
+ if not args:
+ if default is self._ARG_DEFAULT:
+ raise MissingArgumentError(name)
+ return default
+ return args[-1]
+
+ def _get_arguments(self, name, source, strip=True):
+ values = []
+ for v in source.get(name, []):
+ v = self.decode_argument(v, name=name)
+ if isinstance(v, unicode_type):
+ # Get rid of any weird control chars (unless decoding gave
+ # us bytes, in which case leave it alone)
+ v = RequestHandler._remove_control_chars_regex.sub(" ", v)
+ if strip:
+ v = v.strip()
+ values.append(v)
+ return values
+
+ def decode_argument(self, value, name=None):
+ """Decodes an argument from the request.
+
+ The argument has been percent-decoded and is now a byte string.
+ By default, this method decodes the argument as utf-8 and returns
+ a unicode string, but this may be overridden in subclasses.
+
+ This method is used as a filter for both `get_argument()` and for
+ values extracted from the url and passed to `get()`/`post()`/etc.
+
+ The name of the argument is provided if known, but may be None
+ (e.g. for unnamed groups in the url regex).
+ """
+ try:
+ return _unicode(value)
+ except UnicodeDecodeError:
+ raise HTTPError(400, "Invalid unicode in %s: %r" %
+ (name or "url", value[:40]))
+
+ @property
+ def cookies(self):
+ """An alias for
+ `self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
+ return self.request.cookies
+
+ def get_cookie(self, name, default=None):
+ """Gets the value of the cookie with the given name, else default."""
+ if self.request.cookies is not None and name in self.request.cookies:
+ return self.request.cookies[name].value
+ return default
+
+ def set_cookie(self, name, value, domain=None, expires=None, path="/",
+ expires_days=None, **kwargs):
+ """Sets the given cookie name/value with the given options.
+
+ Additional keyword arguments are set on the Cookie.Morsel
+ directly.
+ See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
+ for available attributes.
+ """
+ # The cookie library only accepts type str, in both python 2 and 3
+ name = escape.native_str(name)
+ value = escape.native_str(value)
+ if re.search(r"[\x00-\x20]", name + value):
+ # Don't let us accidentally inject bad stuff
+ raise ValueError("Invalid cookie %r: %r" % (name, value))
+ if not hasattr(self, "_new_cookie"):
+ self._new_cookie = Cookie.SimpleCookie()
+ if name in self._new_cookie:
+ del self._new_cookie[name]
+ self._new_cookie[name] = value
+ morsel = self._new_cookie[name]
+ if domain:
+ morsel["domain"] = domain
+ if expires_days is not None and not expires:
+ expires = datetime.datetime.utcnow() + datetime.timedelta(
+ days=expires_days)
+ if expires:
+ morsel["expires"] = httputil.format_timestamp(expires)
+ if path:
+ morsel["path"] = path
+ for k, v in kwargs.items():
+ if k == 'max_age':
+ k = 'max-age'
+
+ # skip falsy values for httponly and secure flags because
+ # SimpleCookie sets them regardless
+ if k in ['httponly', 'secure'] and not v:
+ continue
+
+ morsel[k] = v
+
+ def clear_cookie(self, name, path="/", domain=None):
+ """Deletes the cookie with the given name.
+
+ Due to limitations of the cookie protocol, you must pass the same
+ path and domain to clear a cookie as were used when that cookie
+ was set (but there is no way to find out on the server side
+ which values were used for a given cookie).
+ """
+ expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
+ self.set_cookie(name, value="", path=path, expires=expires,
+ domain=domain)
+
+ def clear_all_cookies(self, path="/", domain=None):
+ """Deletes all the cookies the user sent with this request.
+
+ See `clear_cookie` for more information on the path and domain
+ parameters.
+
+ .. versionchanged:: 3.2
+
+ Added the ``path`` and ``domain`` parameters.
+ """
+ for name in self.request.cookies:
+ self.clear_cookie(name, path=path, domain=domain)
+
+ def set_secure_cookie(self, name, value, expires_days=30, version=None,
+ **kwargs):
+ """Signs and timestamps a cookie so it cannot be forged.
+
+ You must specify the ``cookie_secret`` setting in your Application
+ to use this method. It should be a long, random sequence of bytes
+ to be used as the HMAC secret for the signature.
+
+ To read a cookie set with this method, use `get_secure_cookie()`.
+
+ Note that the ``expires_days`` parameter sets the lifetime of the
+ cookie in the browser, but is independent of the ``max_age_days``
+ parameter to `get_secure_cookie`.
+
+ Secure cookies may contain arbitrary byte values, not just unicode
+ strings (unlike regular cookies)
+
+ .. versionchanged:: 3.2.1
+
+ Added the ``version`` argument. Introduced cookie version 2
+ and made it the default.
+ """
+ self.set_cookie(name, self.create_signed_value(name, value,
+ version=version),
+ expires_days=expires_days, **kwargs)
+
+ def create_signed_value(self, name, value, version=None):
+ """Signs and timestamps a string so it cannot be forged.
+
+ Normally used via set_secure_cookie, but provided as a separate
+ method for non-cookie uses. To decode a value not stored
+ as a cookie use the optional value argument to get_secure_cookie.
+
+ .. versionchanged:: 3.2.1
+
+ Added the ``version`` argument. Introduced cookie version 2
+ and made it the default.
+ """
+ self.require_setting("cookie_secret", "secure cookies")
+ secret = self.application.settings["cookie_secret"]
+ key_version = None
+ if isinstance(secret, dict):
+ if self.application.settings.get("key_version") is None:
+ raise Exception("key_version setting must be used for secret_key dicts")
+ key_version = self.application.settings["key_version"]
+
+ return create_signed_value(secret, name, value, version=version,
+ key_version=key_version)
+
+ def get_secure_cookie(self, name, value=None, max_age_days=31,
+ min_version=None):
+ """Returns the given signed cookie if it validates, or None.
+
+ The decoded cookie value is returned as a byte string (unlike
+ `get_cookie`).
+
+ .. versionchanged:: 3.2.1
+
+ Added the ``min_version`` argument. Introduced cookie version 2;
+ both versions 1 and 2 are accepted by default.
+ """
+ self.require_setting("cookie_secret", "secure cookies")
+ if value is None:
+ value = self.get_cookie(name)
+ return decode_signed_value(self.application.settings["cookie_secret"],
+ name, value, max_age_days=max_age_days,
+ min_version=min_version)
+
+ def get_secure_cookie_key_version(self, name, value=None):
+ """Returns the signing key version of the secure cookie.
+
+ The version is returned as int.
+ """
+ self.require_setting("cookie_secret", "secure cookies")
+ if value is None:
+ value = self.get_cookie(name)
+ return get_signature_key_version(value)
+
+ def redirect(self, url, permanent=False, status=None):
+ """Sends a redirect to the given (optionally relative) URL.
+
+ If the ``status`` argument is specified, that value is used as the
+ HTTP status code; otherwise either 301 (permanent) or 302
+ (temporary) is chosen based on the ``permanent`` argument.
+ The default is 302 (temporary).
+ """
+ if self._headers_written:
+ raise Exception("Cannot redirect after headers have been written")
+ if status is None:
+ status = 301 if permanent else 302
+ else:
+ assert isinstance(status, int) and 300 <= status <= 399
+ self.set_status(status)
+ self.set_header("Location", utf8(url))
+ self.finish()
+
+ def write(self, chunk):
+ """Writes the given chunk to the output buffer.
+
+ To write the output to the network, use the flush() method below.
+
+ If the given chunk is a dictionary, we write it as JSON and set
+ the Content-Type of the response to be ``application/json``.
+ (if you want to send JSON as a different ``Content-Type``, call
+ set_header *after* calling write()).
+
+ Note that lists are not converted to JSON because of a potential
+ cross-site security vulnerability. All JSON output should be
+ wrapped in a dictionary. More details at
+ http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
+ https://github.com/facebook/tornado/issues/1009
+ """
+ if self._finished:
+ raise RuntimeError("Cannot write() after finish()")
+ if not isinstance(chunk, (bytes, unicode_type, dict)):
+ message = "write() only accepts bytes, unicode, and dict objects"
+ if isinstance(chunk, list):
+ message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
+ raise TypeError(message)
+ if isinstance(chunk, dict):
+ chunk = escape.json_encode(chunk)
+ self.set_header("Content-Type", "application/json; charset=UTF-8")
+ chunk = utf8(chunk)
+ self._write_buffer.append(chunk)
+
+ def render(self, template_name, **kwargs):
+ """Renders the template with the given arguments as the response."""
+ if self._finished:
+ raise RuntimeError("Cannot render() after finish()")
+ html = self.render_string(template_name, **kwargs)
+
+ # Insert the additional JS and CSS added by the modules on the page
+ js_embed = []
+ js_files = []
+ css_embed = []
+ css_files = []
+ html_heads = []
+ html_bodies = []
+ for module in getattr(self, "_active_modules", {}).values():
+ embed_part = module.embedded_javascript()
+ if embed_part:
+ js_embed.append(utf8(embed_part))
+ file_part = module.javascript_files()
+ if file_part:
+ if isinstance(file_part, (unicode_type, bytes)):
+ js_files.append(file_part)
+ else:
+ js_files.extend(file_part)
+ embed_part = module.embedded_css()
+ if embed_part:
+ css_embed.append(utf8(embed_part))
+ file_part = module.css_files()
+ if file_part:
+ if isinstance(file_part, (unicode_type, bytes)):
+ css_files.append(file_part)
+ else:
+ css_files.extend(file_part)
+ head_part = module.html_head()
+ if head_part:
+ html_heads.append(utf8(head_part))
+ body_part = module.html_body()
+ if body_part:
+ html_bodies.append(utf8(body_part))
+
+ if js_files:
+ # Maintain order of JavaScript files given by modules
+ js = self.render_linked_js(js_files)
+ sloc = html.rindex(b'</body>')
+ html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
+ if js_embed:
+ js = self.render_embed_js(js_embed)
+ sloc = html.rindex(b'</body>')
+ html = html[:sloc] + js + b'\n' + html[sloc:]
+ if css_files:
+ css = self.render_linked_css(css_files)
+ hloc = html.index(b'</head>')
+ html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
+ if css_embed:
+ css = self.render_embed_css(css_embed)
+ hloc = html.index(b'</head>')
+ html = html[:hloc] + css + b'\n' + html[hloc:]
+ if html_heads:
+ hloc = html.index(b'</head>')
+ html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
+ if html_bodies:
+ hloc = html.index(b'</body>')
+ html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
+ self.finish(html)
+
+ def render_linked_js(self, js_files):
+ """Default method used to render the final js links for the
+ rendered webpage.
+
+ Override this method in a sub-classed controller to change the output.
+ """
+ paths = []
+ unique_paths = set()
+
+ for path in js_files:
+ if not is_absolute(path):
+ path = self.static_url(path)
+ if path not in unique_paths:
+ paths.append(path)
+ unique_paths.add(path)
+
+ return ''.join('<script src="' + escape.xhtml_escape(p) +
+ '" type="text/javascript"></script>'
+ for p in paths)
+
+ def render_embed_js(self, js_embed):
+ """Default method used to render the final embedded js for the
+ rendered webpage.
+
+ Override this method in a sub-classed controller to change the output.
+ """
+ return b'<script type="text/javascript">\n//<![CDATA[\n' + \
+ b'\n'.join(js_embed) + b'\n//]]>\n</script>'
+
+ def render_linked_css(self, css_files):
+ """Default method used to render the final css links for the
+ rendered webpage.
+
+ Override this method in a sub-classed controller to change the output.
+ """
+ paths = []
+ unique_paths = set()
+
+ for path in css_files:
+ if not is_absolute(path):
+ path = self.static_url(path)
+ if path not in unique_paths:
+ paths.append(path)
+ unique_paths.add(path)
+
+ return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
+ 'type="text/css" rel="stylesheet"/>'
+ for p in paths)
+
+ def render_embed_css(self, css_embed):
+ """Default method used to render the final embedded css for the
+ rendered webpage.
+
+ Override this method in a sub-classed controller to change the output.
+ """
+ return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
+ b'\n</style>'
+
+ def render_string(self, template_name, **kwargs):
+ """Generate the given template with the given arguments.
+
+ We return the generated byte string (in utf8). To generate and
+ write a template as a response, use render() above.
+ """
+ # If no template_path is specified, use the path of the calling file
+ template_path = self.get_template_path()
+ if not template_path:
+ frame = sys._getframe(0)
+ web_file = frame.f_code.co_filename
+ while frame.f_code.co_filename == web_file:
+ frame = frame.f_back
+ template_path = os.path.dirname(frame.f_code.co_filename)
+ with RequestHandler._template_loader_lock:
+ if template_path not in RequestHandler._template_loaders:
+ loader = self.create_template_loader(template_path)
+ RequestHandler._template_loaders[template_path] = loader
+ else:
+ loader = RequestHandler._template_loaders[template_path]
+ t = loader.load(template_name)
+ namespace = self.get_template_namespace()
+ namespace.update(kwargs)
+ return t.generate(**namespace)
+
+ def get_template_namespace(self):
+ """Returns a dictionary to be used as the default template namespace.
+
+ May be overridden by subclasses to add or modify values.
+
+ The results of this method will be combined with additional
+ defaults in the `tornado.template` module and keyword arguments
+ to `render` or `render_string`.
+ """
+ namespace = dict(
+ handler=self,
+ request=self.request,
+ current_user=self.current_user,
+ locale=self.locale,
+ _=self.locale.translate,
+ pgettext=self.locale.pgettext,
+ static_url=self.static_url,
+ xsrf_form_html=self.xsrf_form_html,
+ reverse_url=self.reverse_url
+ )
+ namespace.update(self.ui)
+ return namespace
+
+ def create_template_loader(self, template_path):
+ """Returns a new template loader for the given path.
+
+ May be overridden by subclasses. By default returns a
+ directory-based loader on the given path, using the
+ ``autoescape`` and ``template_whitespace`` application
+ settings. If a ``template_loader`` application setting is
+ supplied, uses that instead.
+ """
+ settings = self.application.settings
+ if "template_loader" in settings:
+ return settings["template_loader"]
+ kwargs = {}
+ if "autoescape" in settings:
+ # autoescape=None means "no escaping", so we have to be sure
+ # to only pass this kwarg if the user asked for it.
+ kwargs["autoescape"] = settings["autoescape"]
+ if "template_whitespace" in settings:
+ kwargs["whitespace"] = settings["template_whitespace"]
+ return template.Loader(template_path, **kwargs)
+
+ def flush(self, include_footers=False, callback=None):
+ """Flushes the current output buffer to the network.
+
+ The ``callback`` argument, if given, can be used for flow control:
+ it will be run when all flushed data has been written to the socket.
+ Note that only one flush callback can be outstanding at a time;
+ if another flush occurs before the previous flush's callback
+ has been run, the previous callback will be discarded.
+
+ .. versionchanged:: 4.0
+ Now returns a `.Future` if no callback is given.
+ """
+ chunk = b"".join(self._write_buffer)
+ self._write_buffer = []
+ if not self._headers_written:
+ self._headers_written = True
+ for transform in self._transforms:
+ self._status_code, self._headers, chunk = \
+ transform.transform_first_chunk(
+ self._status_code, self._headers,
+ chunk, include_footers)
+ # Ignore the chunk and only write the headers for HEAD requests
+ if self.request.method == "HEAD":
+ chunk = None
+
+ # Finalize the cookie headers (which have been stored in a side
+ # object so an outgoing cookie could be overwritten before it
+ # is sent).
+ if hasattr(self, "_new_cookie"):
+ for cookie in self._new_cookie.values():
+ self.add_header("Set-Cookie", cookie.OutputString(None))
+
+ start_line = httputil.ResponseStartLine('',
+ self._status_code,
+ self._reason)
+ return self.request.connection.write_headers(
+ start_line, self._headers, chunk, callback=callback)
+ else:
+ for transform in self._transforms:
+ chunk = transform.transform_chunk(chunk, include_footers)
+ # Ignore the chunk and only write the headers for HEAD requests
+ if self.request.method != "HEAD":
+ return self.request.connection.write(chunk, callback=callback)
+ else:
+ future = Future()
+ future.set_result(None)
+ return future
+
+ def finish(self, chunk=None):
+ """Finishes this response, ending the HTTP request."""
+ if self._finished:
+ raise RuntimeError("finish() called twice")
+
+ if chunk is not None:
+ self.write(chunk)
+
+ # Automatically support ETags and add the Content-Length header if
+ # we have not flushed any content yet.
+ if not self._headers_written:
+ if (self._status_code == 200 and
+ self.request.method in ("GET", "HEAD") and
+ "Etag" not in self._headers):
+ self.set_etag_header()
+ if self.check_etag_header():
+ self._write_buffer = []
+ self.set_status(304)
+ if (self._status_code in (204, 304) or
+ (self._status_code >= 100 and self._status_code < 200)):
+ assert not self._write_buffer, "Cannot send body with %s" % self._status_code
+ self._clear_headers_for_304()
+ elif "Content-Length" not in self._headers:
+ content_length = sum(len(part) for part in self._write_buffer)
+ self.set_header("Content-Length", content_length)
+
+ if hasattr(self.request, "connection"):
+ # Now that the request is finished, clear the callback we
+ # set on the HTTPConnection (which would otherwise prevent the
+ # garbage collection of the RequestHandler when there
+ # are keepalive connections)
+ self.request.connection.set_close_callback(None)
+
+ self.flush(include_footers=True)
+ self.request.finish()
+ self._log()
+ self._finished = True
+ self.on_finish()
+ self._break_cycles()
+
+ def _break_cycles(self):
+ # Break up a reference cycle between this handler and the
+ # _ui_module closures to allow for faster GC on CPython.
+ self.ui = None
+
+ def send_error(self, status_code=500, **kwargs):
+ """Sends the given HTTP error code to the browser.
+
+ If `flush()` has already been called, it is not possible to send
+ an error, so this method will simply terminate the response.
+ If output has been written but not yet flushed, it will be discarded
+ and replaced with the error page.
+
+ Override `write_error()` to customize the error page that is returned.
+ Additional keyword arguments are passed through to `write_error`.
+ """
+ if self._headers_written:
+ gen_log.error("Cannot send error response after headers written")
+ if not self._finished:
+ # If we get an error between writing headers and finishing,
+ # we are unlikely to be able to finish due to a
+ # Content-Length mismatch. Try anyway to release the
+ # socket.
+ try:
+ self.finish()
+ except Exception:
+ gen_log.error("Failed to flush partial response",
+ exc_info=True)
+ return
+ self.clear()
+
+ reason = kwargs.get('reason')
+ if 'exc_info' in kwargs:
+ exception = kwargs['exc_info'][1]
+ if isinstance(exception, HTTPError) and exception.reason:
+ reason = exception.reason
+ self.set_status(status_code, reason=reason)
+ try:
+ self.write_error(status_code, **kwargs)
+ except Exception:
+ app_log.error("Uncaught exception in write_error", exc_info=True)
+ if not self._finished:
+ self.finish()
+
+ def write_error(self, status_code, **kwargs):
+ """Override to implement custom error pages.
+
+ ``write_error`` may call `write`, `render`, `set_header`, etc
+ to produce output as usual.
+
+ If this error was caused by an uncaught exception (including
+ HTTPError), an ``exc_info`` triple will be available as
+ ``kwargs["exc_info"]``. Note that this exception may not be
+ the "current" exception for purposes of methods like
+ ``sys.exc_info()`` or ``traceback.format_exc``.
+ """
+ if self.settings.get("serve_traceback") and "exc_info" in kwargs:
+ # in debug mode, try to send a traceback
+ self.set_header('Content-Type', 'text/plain')
+ for line in traceback.format_exception(*kwargs["exc_info"]):
+ self.write(line)
+ self.finish()
+ else:
+ self.finish("<html><title>%(code)d: %(message)s</title>"
+ "<body>%(code)d: %(message)s</body></html>" % {
+ "code": status_code,
+ "message": self._reason,
+ })
+
+ @property
+ def locale(self):
+ """The locale for the current session.
+
+ Determined by either `get_user_locale`, which you can override to
+ set the locale based on, e.g., a user preference stored in a
+ database, or `get_browser_locale`, which uses the ``Accept-Language``
+ header.
+
+ .. versionchanged: 4.1
+ Added a property setter.
+ """
+ if not hasattr(self, "_locale"):
+ self._locale = self.get_user_locale()
+ if not self._locale:
+ self._locale = self.get_browser_locale()
+ assert self._locale
+ return self._locale
+
+ @locale.setter
+ def locale(self, value):
+ self._locale = value
+
+ def get_user_locale(self):
+ """Override to determine the locale from the authenticated user.
+
+ If None is returned, we fall back to `get_browser_locale()`.
+
+ This method should return a `tornado.locale.Locale` object,
+ most likely obtained via a call like ``tornado.locale.get("en")``
+ """
+ return None
+
+ def get_browser_locale(self, default="en_US"):
+ """Determines the user's locale from ``Accept-Language`` header.
+
+ See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
+ """
+ if "Accept-Language" in self.request.headers:
+ languages = self.request.headers["Accept-Language"].split(",")
+ locales = []
+ for language in languages:
+ parts = language.strip().split(";")
+ if len(parts) > 1 and parts[1].startswith("q="):
+ try:
+ score = float(parts[1][2:])
+ except (ValueError, TypeError):
+ score = 0.0
+ else:
+ score = 1.0
+ locales.append((parts[0], score))
+ if locales:
+ locales.sort(key=lambda pair: pair[1], reverse=True)
+ codes = [l[0] for l in locales]
+ return locale.get(*codes)
+ return locale.get(default)
+
+ @property
+ def current_user(self):
+ """The authenticated user for this request.
+
+ This is set in one of two ways:
+
+ * A subclass may override `get_current_user()`, which will be called
+ automatically the first time ``self.current_user`` is accessed.
+ `get_current_user()` will only be called once per request,
+ and is cached for future access::
+
+ def get_current_user(self):
+ user_cookie = self.get_secure_cookie("user")
+ if user_cookie:
+ return json.loads(user_cookie)
+ return None
+
+ * It may be set as a normal variable, typically from an overridden
+ `prepare()`::
+
+ @gen.coroutine
+ def prepare(self):
+ user_id_cookie = self.get_secure_cookie("user_id")
+ if user_id_cookie:
+ self.current_user = yield load_user(user_id_cookie)
+
+ Note that `prepare()` may be a coroutine while `get_current_user()`
+ may not, so the latter form is necessary if loading the user requires
+ asynchronous operations.
+
+ The user object may be any type of the application's choosing.
+ """
+ if not hasattr(self, "_current_user"):
+ self._current_user = self.get_current_user()
+ return self._current_user
+
+ @current_user.setter
+ def current_user(self, value):
+ self._current_user = value
+
+ def get_current_user(self):
+ """Override to determine the current user from, e.g., a cookie.
+
+ This method may not be a coroutine.
+ """
+ return None
+
+ def get_login_url(self):
+ """Override to customize the login URL based on the request.
+
+ By default, we use the ``login_url`` application setting.
+ """
+ self.require_setting("login_url", "@tornado.web.authenticated")
+ return self.application.settings["login_url"]
+
+ def get_template_path(self):
+ """Override to customize template path for each handler.
+
+ By default, we use the ``template_path`` application setting.
+ Return None to load templates relative to the calling file.
+ """
+ return self.application.settings.get("template_path")
+
+ @property
+ def xsrf_token(self):
+ """The XSRF-prevention token for the current user/session.
+
+ To prevent cross-site request forgery, we set an '_xsrf' cookie
+ and include the same '_xsrf' value as an argument with all POST
+ requests. If the two do not match, we reject the form submission
+ as a potential forgery.
+
+ See http://en.wikipedia.org/wiki/Cross-site_request_forgery
+
+ .. versionchanged:: 3.2.2
+ The xsrf token will now be have a random mask applied in every
+ request, which makes it safe to include the token in pages
+ that are compressed. See http://breachattack.com for more
+ information on the issue fixed by this change. Old (version 1)
+ cookies will be converted to version 2 when this method is called
+ unless the ``xsrf_cookie_version`` `Application` setting is
+ set to 1.
+
+ .. versionchanged:: 4.3
+ The ``xsrf_cookie_kwargs`` `Application` setting may be
+ used to supply additional cookie options (which will be
+ passed directly to `set_cookie`). For example,
+ ``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
+ will set the ``secure`` and ``httponly`` flags on the
+ ``_xsrf`` cookie.
+ """
+ if not hasattr(self, "_xsrf_token"):
+ version, token, timestamp = self._get_raw_xsrf_token()
+ output_version = self.settings.get("xsrf_cookie_version", 2)
+ cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
+ if output_version == 1:
+ self._xsrf_token = binascii.b2a_hex(token)
+ elif output_version == 2:
+ mask = os.urandom(4)
+ self._xsrf_token = b"|".join([
+ b"2",
+ binascii.b2a_hex(mask),
+ binascii.b2a_hex(_websocket_mask(mask, token)),
+ utf8(str(int(timestamp)))])
+ else:
+ raise ValueError("unknown xsrf cookie version %d",
+ output_version)
+ if version is None:
+ expires_days = 30 if self.current_user else None
+ self.set_cookie("_xsrf", self._xsrf_token,
+ expires_days=expires_days,
+ **cookie_kwargs)
+ return self._xsrf_token
+
+ def _get_raw_xsrf_token(self):
+ """Read or generate the xsrf token in its raw form.
+
+ The raw_xsrf_token is a tuple containing:
+
+ * version: the version of the cookie from which this token was read,
+ or None if we generated a new token in this request.
+ * token: the raw token data; random (non-ascii) bytes.
+ * timestamp: the time this token was generated (will not be accurate
+ for version 1 cookies)
+ """
+ if not hasattr(self, '_raw_xsrf_token'):
+ cookie = self.get_cookie("_xsrf")
+ if cookie:
+ version, token, timestamp = self._decode_xsrf_token(cookie)
+ else:
+ version, token, timestamp = None, None, None
+ if token is None:
+ version = None
+ token = os.urandom(16)
+ timestamp = time.time()
+ self._raw_xsrf_token = (version, token, timestamp)
+ return self._raw_xsrf_token
+
+ def _decode_xsrf_token(self, cookie):
+ """Convert a cookie string into a the tuple form returned by
+ _get_raw_xsrf_token.
+ """
+
+ try:
+ m = _signed_value_version_re.match(utf8(cookie))
+
+ if m:
+ version = int(m.group(1))
+ if version == 2:
+ _, mask, masked_token, timestamp = cookie.split("|")
+
+ mask = binascii.a2b_hex(utf8(mask))
+ token = _websocket_mask(
+ mask, binascii.a2b_hex(utf8(masked_token)))
+ timestamp = int(timestamp)
+ return version, token, timestamp
+ else:
+ # Treat unknown versions as not present instead of failing.
+ raise Exception("Unknown xsrf cookie version")
+ else:
+ version = 1
+ try:
+ token = binascii.a2b_hex(utf8(cookie))
+ except (binascii.Error, TypeError):
+ token = utf8(cookie)
+ # We don't have a usable timestamp in older versions.
+ timestamp = int(time.time())
+ return (version, token, timestamp)
+ except Exception:
+ # Catch exceptions and return nothing instead of failing.
+ gen_log.debug("Uncaught exception in _decode_xsrf_token",
+ exc_info=True)
+ return None, None, None
+
+ def check_xsrf_cookie(self):
+ """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
+
+ To prevent cross-site request forgery, we set an ``_xsrf``
+ cookie and include the same value as a non-cookie
+ field with all ``POST`` requests. If the two do not match, we
+ reject the form submission as a potential forgery.
+
+ The ``_xsrf`` value may be set as either a form field named ``_xsrf``
+ or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
+ (the latter is accepted for compatibility with Django).
+
+ See http://en.wikipedia.org/wiki/Cross-site_request_forgery
+
+ Prior to release 1.1.1, this check was ignored if the HTTP header
+ ``X-Requested-With: XMLHTTPRequest`` was present. This exception
+ has been shown to be insecure and has been removed. For more
+ information please see
+ http://www.djangoproject.com/weblog/2011/feb/08/security/
+ http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
+
+ .. versionchanged:: 3.2.2
+ Added support for cookie version 2. Both versions 1 and 2 are
+ supported.
+ """
+ token = (self.get_argument("_xsrf", None) or
+ self.request.headers.get("X-Xsrftoken") or
+ self.request.headers.get("X-Csrftoken"))
+ if not token:
+ raise HTTPError(403, "'_xsrf' argument missing from POST")
+ _, token, _ = self._decode_xsrf_token(token)
+ _, expected_token, _ = self._get_raw_xsrf_token()
+ if not token:
+ raise HTTPError(403, "'_xsrf' argument has invalid format")
+ if not _time_independent_equals(utf8(token), utf8(expected_token)):
+ raise HTTPError(403, "XSRF cookie does not match POST argument")
+
+ def xsrf_form_html(self):
+ """An HTML ``<input/>`` element to be included with all POST forms.
+
+ It defines the ``_xsrf`` input value, which we check on all POST
+ requests to prevent cross-site request forgery. If you have set
+ the ``xsrf_cookies`` application setting, you must include this
+ HTML within all of your HTML forms.
+
+ In a template, this method should be called with ``{% module
+ xsrf_form_html() %}``
+
+ See `check_xsrf_cookie()` above for more information.
+ """
+ return '<input type="hidden" name="_xsrf" value="' + \
+ escape.xhtml_escape(self.xsrf_token) + '"/>'
+
+ def static_url(self, path, include_host=None, **kwargs):
+ """Returns a static URL for the given relative static file path.
+
+ This method requires you set the ``static_path`` setting in your
+ application (which specifies the root directory of your static
+ files).
+
+ This method returns a versioned url (by default appending
+ ``?v=<signature>``), which allows the static files to be
+ cached indefinitely. This can be disabled by passing
+ ``include_version=False`` (in the default implementation;
+ other static file implementations are not required to support
+ this, but they may support other options).
+
+ By default this method returns URLs relative to the current
+ host, but if ``include_host`` is true the URL returned will be
+ absolute. If this handler has an ``include_host`` attribute,
+ that value will be used as the default for all `static_url`
+ calls that do not pass ``include_host`` as a keyword argument.
+
+ """
+ self.require_setting("static_path", "static_url")
+ get_url = self.settings.get("static_handler_class",
+ StaticFileHandler).make_static_url
+
+ if include_host is None:
+ include_host = getattr(self, "include_host", False)
+
+ if include_host:
+ base = self.request.protocol + "://" + self.request.host
+ else:
+ base = ""
+
+ return base + get_url(self.settings, path, **kwargs)
+
+ def require_setting(self, name, feature="this feature"):
+ """Raises an exception if the given app setting is not defined."""
+ if not self.application.settings.get(name):
+ raise Exception("You must define the '%s' setting in your "
+ "application to use %s" % (name, feature))
+
+ def reverse_url(self, name, *args):
+ """Alias for `Application.reverse_url`."""
+ return self.application.reverse_url(name, *args)
+
+ def compute_etag(self):
+ """Computes the etag header to be used for this request.
+
+ By default uses a hash of the content written so far.
+
+ May be overridden to provide custom etag implementations,
+ or may return None to disable tornado's default etag support.
+ """
+ hasher = hashlib.sha1()
+ for part in self._write_buffer:
+ hasher.update(part)
+ return '"%s"' % hasher.hexdigest()
+
+ def set_etag_header(self):
+ """Sets the response's Etag header using ``self.compute_etag()``.
+
+ Note: no header will be set if ``compute_etag()`` returns ``None``.
+
+ This method is called automatically when the request is finished.
+ """
+ etag = self.compute_etag()
+ if etag is not None:
+ self.set_header("Etag", etag)
+
+ def check_etag_header(self):
+ """Checks the ``Etag`` header against requests's ``If-None-Match``.
+
+ Returns ``True`` if the request's Etag matches and a 304 should be
+ returned. For example::
+
+ self.set_etag_header()
+ if self.check_etag_header():
+ self.set_status(304)
+ return
+
+ This method is called automatically when the request is finished,
+ but may be called earlier for applications that override
+ `compute_etag` and want to do an early check for ``If-None-Match``
+ before completing the request. The ``Etag`` header should be set
+ (perhaps with `set_etag_header`) before calling this method.
+ """
+ computed_etag = utf8(self._headers.get("Etag", ""))
+ # Find all weak and strong etag values from If-None-Match header
+ # because RFC 7232 allows multiple etag values in a single header.
+ etags = re.findall(
+ br'\*|(?:W/)?"[^"]*"',
+ utf8(self.request.headers.get("If-None-Match", ""))
+ )
+ if not computed_etag or not etags:
+ return False
+
+ match = False
+ if etags[0] == b'*':
+ match = True
+ else:
+ # Use a weak comparison when comparing entity-tags.
+ def val(x):
+ return x[2:] if x.startswith(b'W/') else x
+
+ for etag in etags:
+ if val(etag) == val(computed_etag):
+ match = True
+ break
+ return match
+
+ def _stack_context_handle_exception(self, type, value, traceback):
+ try:
+ # For historical reasons _handle_request_exception only takes
+ # the exception value instead of the full triple,
+ # so re-raise the exception to ensure that it's in
+ # sys.exc_info()
+ raise_exc_info((type, value, traceback))
+ except Exception:
+ self._handle_request_exception(value)
+ return True
+
+ @gen.coroutine
+ def _execute(self, transforms, *args, **kwargs):
+ """Executes this request with the given output transforms."""
+ self._transforms = transforms
+ try:
+ if self.request.method not in self.SUPPORTED_METHODS:
+ raise HTTPError(405)
+ self.path_args = [self.decode_argument(arg) for arg in args]
+ self.path_kwargs = dict((k, self.decode_argument(v, name=k))
+ for (k, v) in kwargs.items())
+ # If XSRF cookies are turned on, reject form submissions without
+ # the proper cookie
+ if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
+ self.application.settings.get("xsrf_cookies"):
+ self.check_xsrf_cookie()
+
+ result = self.prepare()
+ if result is not None:
+ result = yield result
+ if self._prepared_future is not None:
+ # Tell the Application we've finished with prepare()
+ # and are ready for the body to arrive.
+ self._prepared_future.set_result(None)
+ if self._finished:
+ return
+
+ if _has_stream_request_body(self.__class__):
+ # In streaming mode request.body is a Future that signals
+ # the body has been completely received. The Future has no
+ # result; the data has been passed to self.data_received
+ # instead.
+ try:
+ yield self.request.body
+ except iostream.StreamClosedError:
+ return
+
+ method = getattr(self, self.request.method.lower())
+ result = method(*self.path_args, **self.path_kwargs)
+ if result is not None:
+ result = yield result
+ if self._auto_finish and not self._finished:
+ self.finish()
+ except Exception as e:
+ try:
+ self._handle_request_exception(e)
+ except Exception:
+ app_log.error("Exception in exception handler", exc_info=True)
+ if (self._prepared_future is not None and
+ not self._prepared_future.done()):
+ # In case we failed before setting _prepared_future, do it
+ # now (to unblock the HTTP server). Note that this is not
+ # in a finally block to avoid GC issues prior to Python 3.4.
+ self._prepared_future.set_result(None)
+
+ def data_received(self, chunk):
+ """Implement this method to handle streamed request data.
+
+ Requires the `.stream_request_body` decorator.
+ """
+ raise NotImplementedError()
+
+ def _log(self):
+ """Logs the current request.
+
+ Sort of deprecated since this functionality was moved to the
+ Application, but left in place for the benefit of existing apps
+ that have overridden this method.
+ """
+ self.application.log_request(self)
+
+ def _request_summary(self):
+ return "%s %s (%s)" % (self.request.method, self.request.uri,
+ self.request.remote_ip)
+
+ def _handle_request_exception(self, e):
+ if isinstance(e, Finish):
+ # Not an error; just finish the request without logging.
+ if not self._finished:
+ self.finish(*e.args)
+ return
+ try:
+ self.log_exception(*sys.exc_info())
+ except Exception:
+ # An error here should still get a best-effort send_error()
+ # to avoid leaking the connection.
+ app_log.error("Error in exception logger", exc_info=True)
+ if self._finished:
+ # Extra errors after the request has been finished should
+ # be logged, but there is no reason to continue to try and
+ # send a response.
+ return
+ if isinstance(e, HTTPError):
+ if e.status_code not in httputil.responses and not e.reason:
+ gen_log.error("Bad HTTP status code: %d", e.status_code)
+ self.send_error(500, exc_info=sys.exc_info())
+ else:
+ self.send_error(e.status_code, exc_info=sys.exc_info())
+ else:
+ self.send_error(500, exc_info=sys.exc_info())
+
+ def log_exception(self, typ, value, tb):
+ """Override to customize logging of uncaught exceptions.
+
+ By default logs instances of `HTTPError` as warnings without
+ stack traces (on the ``tornado.general`` logger), and all
+ other exceptions as errors with stack traces (on the
+ ``tornado.application`` logger).
+
+ .. versionadded:: 3.1
+ """
+ if isinstance(value, HTTPError):
+ if value.log_message:
+ format = "%d %s: " + value.log_message
+ args = ([value.status_code, self._request_summary()] +
+ list(value.args))
+ gen_log.warning(format, *args)
+ else:
+ app_log.error("Uncaught exception %s\n%r", self._request_summary(),
+ self.request, exc_info=(typ, value, tb))
+
+ def _ui_module(self, name, module):
+ def render(*args, **kwargs):
+ if not hasattr(self, "_active_modules"):
+ self._active_modules = {}
+ if name not in self._active_modules:
+ self._active_modules[name] = module(self)
+ rendered = self._active_modules[name].render(*args, **kwargs)
+ return rendered
+ return render
+
+ def _ui_method(self, method):
+ return lambda *args, **kwargs: method(self, *args, **kwargs)
+
+ def _clear_headers_for_304(self):
+ # 304 responses should not contain entity headers (defined in
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
+ # not explicitly allowed by
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
+ headers = ["Allow", "Content-Encoding", "Content-Language",
+ "Content-Length", "Content-MD5", "Content-Range",
+ "Content-Type", "Last-Modified"]
+ for h in headers:
+ self.clear_header(h)
+
+
+def asynchronous(method):
+ """Wrap request handler methods with this if they are asynchronous.
+
+ This decorator is for callback-style asynchronous methods; for
+ coroutines, use the ``@gen.coroutine`` decorator without
+ ``@asynchronous``. (It is legal for legacy reasons to use the two
+ decorators together provided ``@asynchronous`` is first, but
+ ``@asynchronous`` will be ignored in this case)
+
+ This decorator should only be applied to the :ref:`HTTP verb
+ methods <verbs>`; its behavior is undefined for any other method.
+ This decorator does not *make* a method asynchronous; it tells
+ the framework that the method *is* asynchronous. For this decorator
+ to be useful the method must (at least sometimes) do something
+ asynchronous.
+
+ If this decorator is given, the response is not finished when the
+ method returns. It is up to the request handler to call
+ `self.finish() <RequestHandler.finish>` to finish the HTTP
+ request. Without this decorator, the request is automatically
+ finished when the ``get()`` or ``post()`` method returns. Example:
+
+ .. testcode::
+
+ class MyRequestHandler(RequestHandler):
+ @asynchronous
+ def get(self):
+ http = httpclient.AsyncHTTPClient()
+ http.fetch("http://friendfeed.com/", self._on_download)
+
+ def _on_download(self, response):
+ self.write("Downloaded!")
+ self.finish()
+
+ .. testoutput::
+ :hide:
+
+ .. versionchanged:: 3.1
+ The ability to use ``@gen.coroutine`` without ``@asynchronous``.
+
+ .. versionchanged:: 4.3 Returning anything but ``None`` or a
+ yieldable object from a method decorated with ``@asynchronous``
+ is an error. Such return values were previously ignored silently.
+ """
+ # Delay the IOLoop import because it's not available on app engine.
+ from tornado.ioloop import IOLoop
+
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ self._auto_finish = False
+ with stack_context.ExceptionStackContext(
+ self._stack_context_handle_exception):
+ result = method(self, *args, **kwargs)
+ if result is not None:
+ result = gen.convert_yielded(result)
+
+ # If @asynchronous is used with @gen.coroutine, (but
+ # not @gen.engine), we can automatically finish the
+ # request when the future resolves. Additionally,
+ # the Future will swallow any exceptions so we need
+ # to throw them back out to the stack context to finish
+ # the request.
+ def future_complete(f):
+ f.result()
+ if not self._finished:
+ self.finish()
+ IOLoop.current().add_future(result, future_complete)
+ # Once we have done this, hide the Future from our
+ # caller (i.e. RequestHandler._when_complete), which
+ # would otherwise set up its own callback and
+ # exception handler (resulting in exceptions being
+ # logged twice).
+ return None
+ return result
+ return wrapper
+
+
+def stream_request_body(cls):
+ """Apply to `RequestHandler` subclasses to enable streaming body support.
+
+ This decorator implies the following changes:
+
+ * `.HTTPServerRequest.body` is undefined, and body arguments will not
+ be included in `RequestHandler.get_argument`.
+ * `RequestHandler.prepare` is called when the request headers have been
+ read instead of after the entire body has been read.
+ * The subclass must define a method ``data_received(self, data):``, which
+ will be called zero or more times as data is available. Note that
+ if the request has an empty body, ``data_received`` may not be called.
+ * ``prepare`` and ``data_received`` may return Futures (such as via
+ ``@gen.coroutine``, in which case the next method will not be called
+ until those futures have completed.
+ * The regular HTTP method (``post``, ``put``, etc) will be called after
+ the entire body has been read.
+
+ See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
+ for example usage.
+ """
+ if not issubclass(cls, RequestHandler):
+ raise TypeError("expected subclass of RequestHandler, got %r", cls)
+ cls._stream_request_body = True
+ return cls
+
+
+def _has_stream_request_body(cls):
+ if not issubclass(cls, RequestHandler):
+ raise TypeError("expected subclass of RequestHandler, got %r", cls)
+ return getattr(cls, '_stream_request_body', False)
+
+
+def removeslash(method):
+ """Use this decorator to remove trailing slashes from the request path.
+
+ For example, a request to ``/foo/`` would redirect to ``/foo`` with this
+ decorator. Your request handler mapping should use a regular expression
+ like ``r'/foo/*'`` in conjunction with using the decorator.
+ """
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ if self.request.path.endswith("/"):
+ if self.request.method in ("GET", "HEAD"):
+ uri = self.request.path.rstrip("/")
+ if uri: # don't try to redirect '/' to ''
+ if self.request.query:
+ uri += "?" + self.request.query
+ self.redirect(uri, permanent=True)
+ return
+ else:
+ raise HTTPError(404)
+ return method(self, *args, **kwargs)
+ return wrapper
+
+
+def addslash(method):
+ """Use this decorator to add a missing trailing slash to the request path.
+
+ For example, a request to ``/foo`` would redirect to ``/foo/`` with this
+ decorator. Your request handler mapping should use a regular expression
+ like ``r'/foo/?'`` in conjunction with using the decorator.
+ """
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ if not self.request.path.endswith("/"):
+ if self.request.method in ("GET", "HEAD"):
+ uri = self.request.path + "/"
+ if self.request.query:
+ uri += "?" + self.request.query
+ self.redirect(uri, permanent=True)
+ return
+ raise HTTPError(404)
+ return method(self, *args, **kwargs)
+ return wrapper
+
+
+class _ApplicationRouter(ReversibleRuleRouter):
+ """Routing implementation used internally by `Application`.
+
+ Provides a binding between `Application` and `RequestHandler`.
+ This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
+ * it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
+ * it allows to use a list/tuple of rules as `~.routing.Rule` target.
+ ``process_rule`` implementation will substitute this list with an appropriate
+ `_ApplicationRouter` instance.
+ """
+
+ def __init__(self, application, rules=None):
+ assert isinstance(application, Application)
+ self.application = application
+ super(_ApplicationRouter, self).__init__(rules)
+
+ def process_rule(self, rule):
+ rule = super(_ApplicationRouter, self).process_rule(rule)
+
+ if isinstance(rule.target, (list, tuple)):
+ rule.target = _ApplicationRouter(self.application, rule.target)
+
+ return rule
+
+ def get_target_delegate(self, target, request, **target_params):
+ if isclass(target) and issubclass(target, RequestHandler):
+ return self.application.get_handler_delegate(request, target, **target_params)
+
+ return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
+
+
+class Application(ReversibleRouter):
+ """A collection of request handlers that make up a web application.
+
+ Instances of this class are callable and can be passed directly to
+ HTTPServer to serve the application::
+
+ application = web.Application([
+ (r"/", MainPageHandler),
+ ])
+ http_server = httpserver.HTTPServer(application)
+ http_server.listen(8080)
+ ioloop.IOLoop.current().start()
+
+ The constructor for this class takes in a list of `~.routing.Rule`
+ objects or tuples of values corresponding to the arguments of
+ `~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
+ the values in square brackets being optional. The default matcher is
+ `~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
+ instead of ``(PathMatches(regexp), target)``.
+
+ A common routing target is a `RequestHandler` subclass, but you can also
+ use lists of rules as a target, which create a nested routing configuration::
+
+ application = web.Application([
+ (HostMatches("example.com"), [
+ (r"/", MainPageHandler),
+ (r"/feed", FeedHandler),
+ ]),
+ ])
+
+ In addition to this you can use nested `~.routing.Router` instances,
+ `~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
+ (see `~.routing` module docs for more information).
+
+ When we receive requests, we iterate over the list in order and
+ instantiate an instance of the first request class whose regexp
+ matches the request path. The request class can be specified as
+ either a class object or a (fully-qualified) name.
+
+ A dictionary may be passed as the third element (``target_kwargs``)
+ of the tuple, which will be used as keyword arguments to the handler's
+ constructor and `~RequestHandler.initialize` method. This pattern
+ is used for the `StaticFileHandler` in this example (note that a
+ `StaticFileHandler` can be installed automatically with the
+ static_path setting described below)::
+
+ application = web.Application([
+ (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
+ ])
+
+ We support virtual hosts with the `add_handlers` method, which takes in
+ a host regular expression as the first argument::
+
+ application.add_handlers(r"www\.myhost\.com", [
+ (r"/article/([0-9]+)", ArticleHandler),
+ ])
+
+ If there's no match for the current request's host, then ``default_host``
+ parameter value is matched against host regular expressions.
+
+ You can serve static files by sending the ``static_path`` setting
+ as a keyword argument. We will serve those files from the
+ ``/static/`` URI (this is configurable with the
+ ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
+ and ``/robots.txt`` from the same directory. A custom subclass of
+ `StaticFileHandler` can be specified with the
+ ``static_handler_class`` setting.
+
+ .. versionchanged:: 4.5
+ Integration with the new `tornado.routing` module.
+ """
+ def __init__(self, handlers=None, default_host=None, transforms=None,
+ **settings):
+ if transforms is None:
+ self.transforms = []
+ if settings.get("compress_response") or settings.get("gzip"):
+ self.transforms.append(GZipContentEncoding)
+ else:
+ self.transforms = transforms
+ self.default_host = default_host
+ self.settings = settings
+ self.ui_modules = {'linkify': _linkify,
+ 'xsrf_form_html': _xsrf_form_html,
+ 'Template': TemplateModule,
+ }
+ self.ui_methods = {}
+ self._load_ui_modules(settings.get("ui_modules", {}))
+ self._load_ui_methods(settings.get("ui_methods", {}))
+ if self.settings.get("static_path"):
+ path = self.settings["static_path"]
+ handlers = list(handlers or [])
+ static_url_prefix = settings.get("static_url_prefix",
+ "/static/")
+ static_handler_class = settings.get("static_handler_class",
+ StaticFileHandler)
+ static_handler_args = settings.get("static_handler_args", {})
+ static_handler_args['path'] = path
+ for pattern in [re.escape(static_url_prefix) + r"(.*)",
+ r"/(favicon\.ico)", r"/(robots\.txt)"]:
+ handlers.insert(0, (pattern, static_handler_class,
+ static_handler_args))
+
+ if self.settings.get('debug'):
+ self.settings.setdefault('autoreload', True)
+ self.settings.setdefault('compiled_template_cache', False)
+ self.settings.setdefault('static_hash_cache', False)
+ self.settings.setdefault('serve_traceback', True)
+
+ self.wildcard_router = _ApplicationRouter(self, handlers)
+ self.default_router = _ApplicationRouter(self, [
+ Rule(AnyMatches(), self.wildcard_router)
+ ])
+
+ # Automatically reload modified modules
+ if self.settings.get('autoreload'):
+ from tornado import autoreload
+ autoreload.start()
+
+ def listen(self, port, address="", **kwargs):
+ """Starts an HTTP server for this application on the given port.
+
+ This is a convenience alias for creating an `.HTTPServer`
+ object and calling its listen method. Keyword arguments not
+ supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
+ `.HTTPServer` constructor. For advanced uses
+ (e.g. multi-process mode), do not use this method; create an
+ `.HTTPServer` and call its
+ `.TCPServer.bind`/`.TCPServer.start` methods directly.
+
+ Note that after calling this method you still need to call
+ ``IOLoop.current().start()`` to start the server.
+
+ Returns the `.HTTPServer` object.
+
+ .. versionchanged:: 4.3
+ Now returns the `.HTTPServer` object.
+ """
+ # import is here rather than top level because HTTPServer
+ # is not importable on appengine
+ from tornado.httpserver import HTTPServer
+ server = HTTPServer(self, **kwargs)
+ server.listen(port, address)
+ return server
+
+ def add_handlers(self, host_pattern, host_handlers):
+ """Appends the given handlers to our handler list.
+
+ Host patterns are processed sequentially in the order they were
+ added. All matching patterns will be considered.
+ """
+ host_matcher = HostMatches(host_pattern)
+ rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
+
+ self.default_router.rules.insert(-1, rule)
+
+ if self.default_host is not None:
+ self.wildcard_router.add_rules([(
+ DefaultHostMatches(self, host_matcher.host_pattern),
+ host_handlers
+ )])
+
+ def add_transform(self, transform_class):
+ self.transforms.append(transform_class)
+
+ def _load_ui_methods(self, methods):
+ if isinstance(methods, types.ModuleType):
+ self._load_ui_methods(dict((n, getattr(methods, n))
+ for n in dir(methods)))
+ elif isinstance(methods, list):
+ for m in methods:
+ self._load_ui_methods(m)
+ else:
+ for name, fn in methods.items():
+ if not name.startswith("_") and hasattr(fn, "__call__") \
+ and name[0].lower() == name[0]:
+ self.ui_methods[name] = fn
+
+ def _load_ui_modules(self, modules):
+ if isinstance(modules, types.ModuleType):
+ self._load_ui_modules(dict((n, getattr(modules, n))
+ for n in dir(modules)))
+ elif isinstance(modules, list):
+ for m in modules:
+ self._load_ui_modules(m)
+ else:
+ assert isinstance(modules, dict)
+ for name, cls in modules.items():
+ try:
+ if issubclass(cls, UIModule):
+ self.ui_modules[name] = cls
+ except TypeError:
+ pass
+
+ def __call__(self, request):
+ # Legacy HTTPServer interface
+ dispatcher = self.find_handler(request)
+ return dispatcher.execute()
+
+ def find_handler(self, request, **kwargs):
+ route = self.default_router.find_handler(request)
+ if route is not None:
+ return route
+
+ if self.settings.get('default_handler_class'):
+ return self.get_handler_delegate(
+ request,
+ self.settings['default_handler_class'],
+ self.settings.get('default_handler_args', {}))
+
+ return self.get_handler_delegate(
+ request, ErrorHandler, {'status_code': 404})
+
+ def get_handler_delegate(self, request, target_class, target_kwargs=None,
+ path_args=None, path_kwargs=None):
+ """Returns `~.httputil.HTTPMessageDelegate` that can serve a request
+ for application and `RequestHandler` subclass.
+
+ :arg httputil.HTTPServerRequest request: current HTTP request.
+ :arg RequestHandler target_class: a `RequestHandler` class.
+ :arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
+ :arg list path_args: positional arguments for ``target_class`` HTTP method that
+ will be executed while handling a request (``get``, ``post`` or any other).
+ :arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
+ """
+ return _HandlerDelegate(
+ self, request, target_class, target_kwargs, path_args, path_kwargs)
+
+ def reverse_url(self, name, *args):
+ """Returns a URL path for handler named ``name``
+
+ The handler must be added to the application as a named `URLSpec`.
+
+ Args will be substituted for capturing groups in the `URLSpec` regex.
+ They will be converted to strings if necessary, encoded as utf8,
+ and url-escaped.
+ """
+ reversed_url = self.default_router.reverse_url(name, *args)
+ if reversed_url is not None:
+ return reversed_url
+
+ raise KeyError("%s not found in named urls" % name)
+
+ def log_request(self, handler):
+ """Writes a completed HTTP request to the logs.
+
+ By default writes to the python root logger. To change
+ this behavior either subclass Application and override this method,
+ or pass a function in the application settings dictionary as
+ ``log_function``.
+ """
+ if "log_function" in self.settings:
+ self.settings["log_function"](handler)
+ return
+ if handler.get_status() < 400:
+ log_method = access_log.info
+ elif handler.get_status() < 500:
+ log_method = access_log.warning
+ else:
+ log_method = access_log.error
+ request_time = 1000.0 * handler.request.request_time()
+ log_method("%d %s %.2fms", handler.get_status(),
+ handler._request_summary(), request_time)
+
+
+class _HandlerDelegate(httputil.HTTPMessageDelegate):
+ def __init__(self, application, request, handler_class, handler_kwargs,
+ path_args, path_kwargs):
+ self.application = application
+ self.connection = request.connection
+ self.request = request
+ self.handler_class = handler_class
+ self.handler_kwargs = handler_kwargs or {}
+ self.path_args = path_args or []
+ self.path_kwargs = path_kwargs or {}
+ self.chunks = []
+ self.stream_request_body = _has_stream_request_body(self.handler_class)
+
+ def headers_received(self, start_line, headers):
+ if self.stream_request_body:
+ self.request.body = Future()
+ return self.execute()
+
+ def data_received(self, data):
+ if self.stream_request_body:
+ return self.handler.data_received(data)
+ else:
+ self.chunks.append(data)
+
+ def finish(self):
+ if self.stream_request_body:
+ self.request.body.set_result(None)
+ else:
+ self.request.body = b''.join(self.chunks)
+ self.request._parse_body()
+ self.execute()
+
+ def on_connection_close(self):
+ if self.stream_request_body:
+ self.handler.on_connection_close()
+ else:
+ self.chunks = None
+
+ def execute(self):
+ # If template cache is disabled (usually in the debug mode),
+ # re-compile templates and reload static files on every
+ # request so you don't need to restart to see changes
+ if not self.application.settings.get("compiled_template_cache", True):
+ with RequestHandler._template_loader_lock:
+ for loader in RequestHandler._template_loaders.values():
+ loader.reset()
+ if not self.application.settings.get('static_hash_cache', True):
+ StaticFileHandler.reset()
+
+ self.handler = self.handler_class(self.application, self.request,
+ **self.handler_kwargs)
+ transforms = [t(self.request) for t in self.application.transforms]
+
+ if self.stream_request_body:
+ self.handler._prepared_future = Future()
+ # Note that if an exception escapes handler._execute it will be
+ # trapped in the Future it returns (which we are ignoring here,
+ # leaving it to be logged when the Future is GC'd).
+ # However, that shouldn't happen because _execute has a blanket
+ # except handler, and we cannot easily access the IOLoop here to
+ # call add_future (because of the requirement to remain compatible
+ # with WSGI)
+ self.handler._execute(transforms, *self.path_args,
+ **self.path_kwargs)
+ # If we are streaming the request body, then execute() is finished
+ # when the handler has prepared to receive the body. If not,
+ # it doesn't matter when execute() finishes (so we return None)
+ return self.handler._prepared_future
+
+
+class HTTPError(Exception):
+ """An exception that will turn into an HTTP error response.
+
+ Raising an `HTTPError` is a convenient alternative to calling
+ `RequestHandler.send_error` since it automatically ends the
+ current function.
+
+ To customize the response sent with an `HTTPError`, override
+ `RequestHandler.write_error`.
+
+ :arg int status_code: HTTP status code. Must be listed in
+ `httplib.responses <http.client.responses>` unless the ``reason``
+ keyword argument is given.
+ :arg string log_message: Message to be written to the log for this error
+ (will not be shown to the user unless the `Application` is in debug
+ mode). May contain ``%s``-style placeholders, which will be filled
+ in with remaining positional parameters.
+ :arg string reason: Keyword-only argument. The HTTP "reason" phrase
+ to pass in the status line along with ``status_code``. Normally
+ determined automatically from ``status_code``, but can be used
+ to use a non-standard numeric code.
+ """
+ def __init__(self, status_code=500, log_message=None, *args, **kwargs):
+ self.status_code = status_code
+ self.log_message = log_message
+ self.args = args
+ self.reason = kwargs.get('reason', None)
+ if log_message and not args:
+ self.log_message = log_message.replace('%', '%%')
+
+ def __str__(self):
+ message = "HTTP %d: %s" % (
+ self.status_code,
+ self.reason or httputil.responses.get(self.status_code, 'Unknown'))
+ if self.log_message:
+ return message + " (" + (self.log_message % self.args) + ")"
+ else:
+ return message
+
+
+class Finish(Exception):
+ """An exception that ends the request without producing an error response.
+
+ When `Finish` is raised in a `RequestHandler`, the request will
+ end (calling `RequestHandler.finish` if it hasn't already been
+ called), but the error-handling methods (including
+ `RequestHandler.write_error`) will not be called.
+
+ If `Finish()` was created with no arguments, the pending response
+ will be sent as-is. If `Finish()` was given an argument, that
+ argument will be passed to `RequestHandler.finish()`.
+
+ This can be a more convenient way to implement custom error pages
+ than overriding ``write_error`` (especially in library code)::
+
+ if self.current_user is None:
+ self.set_status(401)
+ self.set_header('WWW-Authenticate', 'Basic realm="something"')
+ raise Finish()
+
+ .. versionchanged:: 4.3
+ Arguments passed to ``Finish()`` will be passed on to
+ `RequestHandler.finish`.
+ """
+ pass
+
+
+class MissingArgumentError(HTTPError):
+ """Exception raised by `RequestHandler.get_argument`.
+
+ This is a subclass of `HTTPError`, so if it is uncaught a 400 response
+ code will be used instead of 500 (and a stack trace will not be logged).
+
+ .. versionadded:: 3.1
+ """
+ def __init__(self, arg_name):
+ super(MissingArgumentError, self).__init__(
+ 400, 'Missing argument %s' % arg_name)
+ self.arg_name = arg_name
+
+
+class ErrorHandler(RequestHandler):
+ """Generates an error response with ``status_code`` for all requests."""
+ def initialize(self, status_code):
+ self.set_status(status_code)
+
+ def prepare(self):
+ raise HTTPError(self._status_code)
+
+ def check_xsrf_cookie(self):
+ # POSTs to an ErrorHandler don't actually have side effects,
+ # so we don't need to check the xsrf token. This allows POSTs
+ # to the wrong url to return a 404 instead of 403.
+ pass
+
+
+class RedirectHandler(RequestHandler):
+ """Redirects the client to the given URL for all GET requests.
+
+ You should provide the keyword argument ``url`` to the handler, e.g.::
+
+ application = web.Application([
+ (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
+ ])
+
+ `RedirectHandler` supports regular expression substitutions. E.g., to
+ swap the first and second parts of a path while preserving the remainder::
+
+ application = web.Application([
+ (r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
+ ])
+
+ The final URL is formatted with `str.format` and the substrings that match
+ the capturing groups. In the above example, a request to "/a/b/c" would be
+ formatted like::
+
+ str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
+
+ Use Python's :ref:`format string syntax <formatstrings>` to customize how
+ values are substituted.
+
+ .. versionchanged:: 4.5
+ Added support for substitutions into the destination URL.
+ """
+ def initialize(self, url, permanent=True):
+ self._url = url
+ self._permanent = permanent
+
+ def get(self, *args):
+ self.redirect(self._url.format(*args), permanent=self._permanent)
+
+
+class StaticFileHandler(RequestHandler):
+ """A simple handler that can serve static content from a directory.
+
+ A `StaticFileHandler` is configured automatically if you pass the
+ ``static_path`` keyword argument to `Application`. This handler
+ can be customized with the ``static_url_prefix``, ``static_handler_class``,
+ and ``static_handler_args`` settings.
+
+ To map an additional path to this handler for a static data directory
+ you would add a line to your application like::
+
+ application = web.Application([
+ (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
+ ])
+
+ The handler constructor requires a ``path`` argument, which specifies the
+ local root directory of the content to be served.
+
+ Note that a capture group in the regex is required to parse the value for
+ the ``path`` argument to the get() method (different than the constructor
+ argument above); see `URLSpec` for details.
+
+ To serve a file like ``index.html`` automatically when a directory is
+ requested, set ``static_handler_args=dict(default_filename="index.html")``
+ in your application settings, or add ``default_filename`` as an initializer
+ argument for your ``StaticFileHandler``.
+
+ To maximize the effectiveness of browser caching, this class supports
+ versioned urls (by default using the argument ``?v=``). If a version
+ is given, we instruct the browser to cache this file indefinitely.
+ `make_static_url` (also available as `RequestHandler.static_url`) can
+ be used to construct a versioned url.
+
+ This handler is intended primarily for use in development and light-duty
+ file serving; for heavy traffic it will be more efficient to use
+ a dedicated static file server (such as nginx or Apache). We support
+ the HTTP ``Accept-Ranges`` mechanism to return partial content (because
+ some browsers require this functionality to be present to seek in
+ HTML5 audio or video).
+
+ **Subclassing notes**
+
+ This class is designed to be extensible by subclassing, but because
+ of the way static urls are generated with class methods rather than
+ instance methods, the inheritance patterns are somewhat unusual.
+ Be sure to use the ``@classmethod`` decorator when overriding a
+ class method. Instance methods may use the attributes ``self.path``
+ ``self.absolute_path``, and ``self.modified``.
+
+ Subclasses should only override methods discussed in this section;
+ overriding other methods is error-prone. Overriding
+ ``StaticFileHandler.get`` is particularly problematic due to the
+ tight coupling with ``compute_etag`` and other methods.
+
+ To change the way static urls are generated (e.g. to match the behavior
+ of another server or CDN), override `make_static_url`, `parse_url_path`,
+ `get_cache_time`, and/or `get_version`.
+
+ To replace all interaction with the filesystem (e.g. to serve
+ static content from a database), override `get_content`,
+ `get_content_size`, `get_modified_time`, `get_absolute_path`, and
+ `validate_absolute_path`.
+
+ .. versionchanged:: 3.1
+ Many of the methods for subclasses were added in Tornado 3.1.
+ """
+ CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
+
+ _static_hashes = {} # type: typing.Dict
+ _lock = threading.Lock() # protects _static_hashes
+
+ def initialize(self, path, default_filename=None):
+ self.root = path
+ self.default_filename = default_filename
+
+ @classmethod
+ def reset(cls):
+ with cls._lock:
+ cls._static_hashes = {}
+
+ def head(self, path):
+ return self.get(path, include_body=False)
+
+ @gen.coroutine
+ def get(self, path, include_body=True):
+ # Set up our path instance variables.
+ self.path = self.parse_url_path(path)
+ del path # make sure we don't refer to path instead of self.path again
+ absolute_path = self.get_absolute_path(self.root, self.path)
+ self.absolute_path = self.validate_absolute_path(
+ self.root, absolute_path)
+ if self.absolute_path is None:
+ return
+
+ self.modified = self.get_modified_time()
+ self.set_headers()
+
+ if self.should_return_304():
+ self.set_status(304)
+ return
+
+ request_range = None
+ range_header = self.request.headers.get("Range")
+ if range_header:
+ # As per RFC 2616 14.16, if an invalid Range header is specified,
+ # the request will be treated as if the header didn't exist.
+ request_range = httputil._parse_request_range(range_header)
+
+ size = self.get_content_size()
+ if request_range:
+ start, end = request_range
+ if (start is not None and start >= size) or end == 0:
+ # As per RFC 2616 14.35.1, a range is not satisfiable only: if
+ # the first requested byte is equal to or greater than the
+ # content, or when a suffix with length 0 is specified
+ self.set_status(416) # Range Not Satisfiable
+ self.set_header("Content-Type", "text/plain")
+ self.set_header("Content-Range", "bytes */%s" % (size, ))
+ return
+ if start is not None and start < 0:
+ start += size
+ if end is not None and end > size:
+ # Clients sometimes blindly use a large range to limit their
+ # download size; cap the endpoint at the actual file size.
+ end = size
+ # Note: only return HTTP 206 if less than the entire range has been
+ # requested. Not only is this semantically correct, but Chrome
+ # refuses to play audio if it gets an HTTP 206 in response to
+ # ``Range: bytes=0-``.
+ if size != (end or size) - (start or 0):
+ self.set_status(206) # Partial Content
+ self.set_header("Content-Range",
+ httputil._get_content_range(start, end, size))
+ else:
+ start = end = None
+
+ if start is not None and end is not None:
+ content_length = end - start
+ elif end is not None:
+ content_length = end
+ elif start is not None:
+ content_length = size - start
+ else:
+ content_length = size
+ self.set_header("Content-Length", content_length)
+
+ if include_body:
+ content = self.get_content(self.absolute_path, start, end)
+ if isinstance(content, bytes):
+ content = [content]
+ for chunk in content:
+ try:
+ self.write(chunk)
+ yield self.flush()
+ except iostream.StreamClosedError:
+ return
+ else:
+ assert self.request.method == "HEAD"
+
+ def compute_etag(self):
+ """Sets the ``Etag`` header based on static url version.
+
+ This allows efficient ``If-None-Match`` checks against cached
+ versions, and sends the correct ``Etag`` for a partial response
+ (i.e. the same ``Etag`` as the full file).
+
+ .. versionadded:: 3.1
+ """
+ version_hash = self._get_cached_version(self.absolute_path)
+ if not version_hash:
+ return None
+ return '"%s"' % (version_hash, )
+
+ def set_headers(self):
+ """Sets the content and caching headers on the response.
+
+ .. versionadded:: 3.1
+ """
+ self.set_header("Accept-Ranges", "bytes")
+ self.set_etag_header()
+
+ if self.modified is not None:
+ self.set_header("Last-Modified", self.modified)
+
+ content_type = self.get_content_type()
+ if content_type:
+ self.set_header("Content-Type", content_type)
+
+ cache_time = self.get_cache_time(self.path, self.modified,
+ content_type)
+ if cache_time > 0:
+ self.set_header("Expires", datetime.datetime.utcnow() +
+ datetime.timedelta(seconds=cache_time))
+ self.set_header("Cache-Control", "max-age=" + str(cache_time))
+
+ self.set_extra_headers(self.path)
+
+ def should_return_304(self):
+ """Returns True if the headers indicate that we should return 304.
+
+ .. versionadded:: 3.1
+ """
+ if self.check_etag_header():
+ return True
+
+ # Check the If-Modified-Since, and don't send the result if the
+ # content has not been modified
+ ims_value = self.request.headers.get("If-Modified-Since")
+ if ims_value is not None:
+ date_tuple = email.utils.parsedate(ims_value)
+ if date_tuple is not None:
+ if_since = datetime.datetime(*date_tuple[:6])
+ if if_since >= self.modified:
+ return True
+
+ return False
+
+ @classmethod
+ def get_absolute_path(cls, root, path):
+ """Returns the absolute location of ``path`` relative to ``root``.
+
+ ``root`` is the path configured for this `StaticFileHandler`
+ (in most cases the ``static_path`` `Application` setting).
+
+ This class method may be overridden in subclasses. By default
+ it returns a filesystem path, but other strings may be used
+ as long as they are unique and understood by the subclass's
+ overridden `get_content`.
+
+ .. versionadded:: 3.1
+ """
+ abspath = os.path.abspath(os.path.join(root, path))
+ return abspath
+
+ def validate_absolute_path(self, root, absolute_path):
+ """Validate and return the absolute path.
+
+ ``root`` is the configured path for the `StaticFileHandler`,
+ and ``path`` is the result of `get_absolute_path`
+
+ This is an instance method called during request processing,
+ so it may raise `HTTPError` or use methods like
+ `RequestHandler.redirect` (return None after redirecting to
+ halt further processing). This is where 404 errors for missing files
+ are generated.
+
+ This method may modify the path before returning it, but note that
+ any such modifications will not be understood by `make_static_url`.
+
+ In instance methods, this method's result is available as
+ ``self.absolute_path``.
+
+ .. versionadded:: 3.1
+ """
+ # os.path.abspath strips a trailing /.
+ # We must add it back to `root` so that we only match files
+ # in a directory named `root` instead of files starting with
+ # that prefix.
+ root = os.path.abspath(root)
+ if not root.endswith(os.path.sep):
+ # abspath always removes a trailing slash, except when
+ # root is '/'. This is an unusual case, but several projects
+ # have independently discovered this technique to disable
+ # Tornado's path validation and (hopefully) do their own,
+ # so we need to support it.
+ root += os.path.sep
+ # The trailing slash also needs to be temporarily added back
+ # the requested path so a request to root/ will match.
+ if not (absolute_path + os.path.sep).startswith(root):
+ raise HTTPError(403, "%s is not in root static directory",
+ self.path)
+ if (os.path.isdir(absolute_path) and
+ self.default_filename is not None):
+ # need to look at the request.path here for when path is empty
+ # but there is some prefix to the path that was already
+ # trimmed by the routing
+ if not self.request.path.endswith("/"):
+ self.redirect(self.request.path + "/", permanent=True)
+ return
+ absolute_path = os.path.join(absolute_path, self.default_filename)
+ if not os.path.exists(absolute_path):
+ raise HTTPError(404)
+ if not os.path.isfile(absolute_path):
+ raise HTTPError(403, "%s is not a file", self.path)
+ return absolute_path
+
+ @classmethod
+ def get_content(cls, abspath, start=None, end=None):
+ """Retrieve the content of the requested resource which is located
+ at the given absolute path.
+
+ This class method may be overridden by subclasses. Note that its
+ signature is different from other overridable class methods
+ (no ``settings`` argument); this is deliberate to ensure that
+ ``abspath`` is able to stand on its own as a cache key.
+
+ This method should either return a byte string or an iterator
+ of byte strings. The latter is preferred for large files
+ as it helps reduce memory fragmentation.
+
+ .. versionadded:: 3.1
+ """
+ with open(abspath, "rb") as file:
+ if start is not None:
+ file.seek(start)
+ if end is not None:
+ remaining = end - (start or 0)
+ else:
+ remaining = None
+ while True:
+ chunk_size = 64 * 1024
+ if remaining is not None and remaining < chunk_size:
+ chunk_size = remaining
+ chunk = file.read(chunk_size)
+ if chunk:
+ if remaining is not None:
+ remaining -= len(chunk)
+ yield chunk
+ else:
+ if remaining is not None:
+ assert remaining == 0
+ return
+
+ @classmethod
+ def get_content_version(cls, abspath):
+ """Returns a version string for the resource at the given path.
+
+ This class method may be overridden by subclasses. The
+ default implementation is a hash of the file's contents.
+
+ .. versionadded:: 3.1
+ """
+ data = cls.get_content(abspath)
+ hasher = hashlib.md5()
+ if isinstance(data, bytes):
+ hasher.update(data)
+ else:
+ for chunk in data:
+ hasher.update(chunk)
+ return hasher.hexdigest()
+
+ def _stat(self):
+ if not hasattr(self, '_stat_result'):
+ self._stat_result = os.stat(self.absolute_path)
+ return self._stat_result
+
+ def get_content_size(self):
+ """Retrieve the total size of the resource at the given path.
+
+ This method may be overridden by subclasses.
+
+ .. versionadded:: 3.1
+
+ .. versionchanged:: 4.0
+ This method is now always called, instead of only when
+ partial results are requested.
+ """
+ stat_result = self._stat()
+ return stat_result[stat.ST_SIZE]
+
+ def get_modified_time(self):
+ """Returns the time that ``self.absolute_path`` was last modified.
+
+ May be overridden in subclasses. Should return a `~datetime.datetime`
+ object or None.
+
+ .. versionadded:: 3.1
+ """
+ stat_result = self._stat()
+ modified = datetime.datetime.utcfromtimestamp(
+ stat_result[stat.ST_MTIME])
+ return modified
+
+ def get_content_type(self):
+ """Returns the ``Content-Type`` header to be used for this request.
+
+ .. versionadded:: 3.1
+ """
+ mime_type, encoding = mimetypes.guess_type(self.absolute_path)
+ # per RFC 6713, use the appropriate type for a gzip compressed file
+ if encoding == "gzip":
+ return "application/gzip"
+ # As of 2015-07-21 there is no bzip2 encoding defined at
+ # http://www.iana.org/assignments/media-types/media-types.xhtml
+ # So for that (and any other encoding), use octet-stream.
+ elif encoding is not None:
+ return "application/octet-stream"
+ elif mime_type is not None:
+ return mime_type
+ # if mime_type not detected, use application/octet-stream
+ else:
+ return "application/octet-stream"
+
+ def set_extra_headers(self, path):
+ """For subclass to add extra headers to the response"""
+ pass
+
+ def get_cache_time(self, path, modified, mime_type):
+ """Override to customize cache control behavior.
+
+ Return a positive number of seconds to make the result
+ cacheable for that amount of time or 0 to mark resource as
+ cacheable for an unspecified amount of time (subject to
+ browser heuristics).
+
+ By default returns cache expiry of 10 years for resources requested
+ with ``v`` argument.
+ """
+ return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
+
+ @classmethod
+ def make_static_url(cls, settings, path, include_version=True):
+ """Constructs a versioned url for the given path.
+
+ This method may be overridden in subclasses (but note that it
+ is a class method rather than an instance method). Subclasses
+ are only required to implement the signature
+ ``make_static_url(cls, settings, path)``; other keyword
+ arguments may be passed through `~RequestHandler.static_url`
+ but are not standard.
+
+ ``settings`` is the `Application.settings` dictionary. ``path``
+ is the static path being requested. The url returned should be
+ relative to the current host.
+
+ ``include_version`` determines whether the generated URL should
+ include the query string containing the version hash of the
+ file corresponding to the given ``path``.
+
+ """
+ url = settings.get('static_url_prefix', '/static/') + path
+ if not include_version:
+ return url
+
+ version_hash = cls.get_version(settings, path)
+ if not version_hash:
+ return url
+
+ return '%s?v=%s' % (url, version_hash)
+
+ def parse_url_path(self, url_path):
+ """Converts a static URL path into a filesystem path.
+
+ ``url_path`` is the path component of the URL with
+ ``static_url_prefix`` removed. The return value should be
+ filesystem path relative to ``static_path``.
+
+ This is the inverse of `make_static_url`.
+ """
+ if os.path.sep != "/":
+ url_path = url_path.replace("/", os.path.sep)
+ return url_path
+
+ @classmethod
+ def get_version(cls, settings, path):
+ """Generate the version string to be used in static URLs.
+
+ ``settings`` is the `Application.settings` dictionary and ``path``
+ is the relative location of the requested asset on the filesystem.
+ The returned value should be a string, or ``None`` if no version
+ could be determined.
+
+ .. versionchanged:: 3.1
+ This method was previously recommended for subclasses to override;
+ `get_content_version` is now preferred as it allows the base
+ class to handle caching of the result.
+ """
+ abs_path = cls.get_absolute_path(settings['static_path'], path)
+ return cls._get_cached_version(abs_path)
+
+ @classmethod
+ def _get_cached_version(cls, abs_path):
+ with cls._lock:
+ hashes = cls._static_hashes
+ if abs_path not in hashes:
+ try:
+ hashes[abs_path] = cls.get_content_version(abs_path)
+ except Exception:
+ gen_log.error("Could not open static file %r", abs_path)
+ hashes[abs_path] = None
+ hsh = hashes.get(abs_path)
+ if hsh:
+ return hsh
+ return None
+
+
+class FallbackHandler(RequestHandler):
+ """A `RequestHandler` that wraps another HTTP server callback.
+
+ The fallback is a callable object that accepts an
+ `~.httputil.HTTPServerRequest`, such as an `Application` or
+ `tornado.wsgi.WSGIContainer`. This is most useful to use both
+ Tornado ``RequestHandlers`` and WSGI in the same server. Typical
+ usage::
+
+ wsgi_app = tornado.wsgi.WSGIContainer(
+ django.core.handlers.wsgi.WSGIHandler())
+ application = tornado.web.Application([
+ (r"/foo", FooHandler),
+ (r".*", FallbackHandler, dict(fallback=wsgi_app),
+ ])
+ """
+ def initialize(self, fallback):
+ self.fallback = fallback
+
+ def prepare(self):
+ self.fallback(self.request)
+ self._finished = True
+
+
+class OutputTransform(object):
+ """A transform modifies the result of an HTTP request (e.g., GZip encoding)
+
+ Applications are not expected to create their own OutputTransforms
+ or interact with them directly; the framework chooses which transforms
+ (if any) to apply.
+ """
+ def __init__(self, request):
+ pass
+
+ def transform_first_chunk(self, status_code, headers, chunk, finishing):
+ # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
+ return status_code, headers, chunk
+
+ def transform_chunk(self, chunk, finishing):
+ return chunk
+
+
+class GZipContentEncoding(OutputTransform):
+ """Applies the gzip content encoding to the response.
+
+ See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
+
+ .. versionchanged:: 4.0
+ Now compresses all mime types beginning with ``text/``, instead
+ of just a whitelist. (the whitelist is still used for certain
+ non-text mime types).
+ """
+ # Whitelist of compressible mime types (in addition to any types
+ # beginning with "text/").
+ CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
+ "application/xml", "application/atom+xml",
+ "application/json", "application/xhtml+xml",
+ "image/svg+xml"])
+ # Python's GzipFile defaults to level 9, while most other gzip
+ # tools (including gzip itself) default to 6, which is probably a
+ # better CPU/size tradeoff.
+ GZIP_LEVEL = 6
+ # Responses that are too short are unlikely to benefit from gzipping
+ # after considering the "Content-Encoding: gzip" header and the header
+ # inside the gzip encoding.
+ # Note that responses written in multiple chunks will be compressed
+ # regardless of size.
+ MIN_LENGTH = 1024
+
+ def __init__(self, request):
+ self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
+
+ def _compressible_type(self, ctype):
+ return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
+
+ def transform_first_chunk(self, status_code, headers, chunk, finishing):
+ # type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
+ # TODO: can/should this type be inherited from the superclass?
+ if 'Vary' in headers:
+ headers['Vary'] += ', Accept-Encoding'
+ else:
+ headers['Vary'] = 'Accept-Encoding'
+ if self._gzipping:
+ ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
+ self._gzipping = self._compressible_type(ctype) and \
+ (not finishing or len(chunk) >= self.MIN_LENGTH) and \
+ ("Content-Encoding" not in headers)
+ if self._gzipping:
+ headers["Content-Encoding"] = "gzip"
+ self._gzip_value = BytesIO()
+ self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
+ compresslevel=self.GZIP_LEVEL)
+ chunk = self.transform_chunk(chunk, finishing)
+ if "Content-Length" in headers:
+ # The original content length is no longer correct.
+ # If this is the last (and only) chunk, we can set the new
+ # content-length; otherwise we remove it and fall back to
+ # chunked encoding.
+ if finishing:
+ headers["Content-Length"] = str(len(chunk))
+ else:
+ del headers["Content-Length"]
+ return status_code, headers, chunk
+
+ def transform_chunk(self, chunk, finishing):
+ if self._gzipping:
+ self._gzip_file.write(chunk)
+ if finishing:
+ self._gzip_file.close()
+ else:
+ self._gzip_file.flush()
+ chunk = self._gzip_value.getvalue()
+ self._gzip_value.truncate(0)
+ self._gzip_value.seek(0)
+ return chunk
+
+
+def authenticated(method):
+ """Decorate methods with this to require that the user be logged in.
+
+ If the user is not logged in, they will be redirected to the configured
+ `login url <RequestHandler.get_login_url>`.
+
+ If you configure a login url with a query parameter, Tornado will
+ assume you know what you're doing and use it as-is. If not, it
+ will add a `next` parameter so the login page knows where to send
+ you once you're logged in.
+ """
+ @functools.wraps(method)
+ def wrapper(self, *args, **kwargs):
+ if not self.current_user:
+ if self.request.method in ("GET", "HEAD"):
+ url = self.get_login_url()
+ if "?" not in url:
+ if urlparse.urlsplit(url).scheme:
+ # if login url is absolute, make next absolute too
+ next_url = self.request.full_url()
+ else:
+ next_url = self.request.uri
+ url += "?" + urlencode(dict(next=next_url))
+ self.redirect(url)
+ return
+ raise HTTPError(403)
+ return method(self, *args, **kwargs)
+ return wrapper
+
+
+class UIModule(object):
+ """A re-usable, modular UI unit on a page.
+
+ UI modules often execute additional queries, and they can include
+ additional CSS and JavaScript that will be included in the output
+ page, which is automatically inserted on page render.
+
+ Subclasses of UIModule must override the `render` method.
+ """
+ def __init__(self, handler):
+ self.handler = handler
+ self.request = handler.request
+ self.ui = handler.ui
+ self.locale = handler.locale
+
+ @property
+ def current_user(self):
+ return self.handler.current_user
+
+ def render(self, *args, **kwargs):
+ """Override in subclasses to return this module's output."""
+ raise NotImplementedError()
+
+ def embedded_javascript(self):
+ """Override to return a JavaScript string
+ to be embedded in the page."""
+ return None
+
+ def javascript_files(self):
+ """Override to return a list of JavaScript files needed by this module.
+
+ If the return values are relative paths, they will be passed to
+ `RequestHandler.static_url`; otherwise they will be used as-is.
+ """
+ return None
+
+ def embedded_css(self):
+ """Override to return a CSS string
+ that will be embedded in the page."""
+ return None
+
+ def css_files(self):
+ """Override to returns a list of CSS files required by this module.
+
+ If the return values are relative paths, they will be passed to
+ `RequestHandler.static_url`; otherwise they will be used as-is.
+ """
+ return None
+
+ def html_head(self):
+ """Override to return an HTML string that will be put in the <head/>
+ element.
+ """
+ return None
+
+ def html_body(self):
+ """Override to return an HTML string that will be put at the end of
+ the <body/> element.
+ """
+ return None
+
+ def render_string(self, path, **kwargs):
+ """Renders a template and returns it as a string."""
+ return self.handler.render_string(path, **kwargs)
+
+
+class _linkify(UIModule):
+ def render(self, text, **kwargs):
+ return escape.linkify(text, **kwargs)
+
+
+class _xsrf_form_html(UIModule):
+ def render(self):
+ return self.handler.xsrf_form_html()
+
+
+class TemplateModule(UIModule):
+ """UIModule that simply renders the given template.
+
+ {% module Template("foo.html") %} is similar to {% include "foo.html" %},
+ but the module version gets its own namespace (with kwargs passed to
+ Template()) instead of inheriting the outer template's namespace.
+
+ Templates rendered through this module also get access to UIModule's
+ automatic javascript/css features. Simply call set_resources
+ inside the template and give it keyword arguments corresponding to
+ the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
+ Note that these resources are output once per template file, not once
+ per instantiation of the template, so they must not depend on
+ any arguments to the template.
+ """
+ def __init__(self, handler):
+ super(TemplateModule, self).__init__(handler)
+ # keep resources in both a list and a dict to preserve order
+ self._resource_list = []
+ self._resource_dict = {}
+
+ def render(self, path, **kwargs):
+ def set_resources(**kwargs):
+ if path not in self._resource_dict:
+ self._resource_list.append(kwargs)
+ self._resource_dict[path] = kwargs
+ else:
+ if self._resource_dict[path] != kwargs:
+ raise ValueError("set_resources called with different "
+ "resources for the same template")
+ return ""
+ return self.render_string(path, set_resources=set_resources,
+ **kwargs)
+
+ def _get_resources(self, key):
+ return (r[key] for r in self._resource_list if key in r)
+
+ def embedded_javascript(self):
+ return "\n".join(self._get_resources("embedded_javascript"))
+
+ def javascript_files(self):
+ result = []
+ for f in self._get_resources("javascript_files"):
+ if isinstance(f, (unicode_type, bytes)):
+ result.append(f)
+ else:
+ result.extend(f)
+ return result
+
+ def embedded_css(self):
+ return "\n".join(self._get_resources("embedded_css"))
+
+ def css_files(self):
+ result = []
+ for f in self._get_resources("css_files"):
+ if isinstance(f, (unicode_type, bytes)):
+ result.append(f)
+ else:
+ result.extend(f)
+ return result
+
+ def html_head(self):
+ return "".join(self._get_resources("html_head"))
+
+ def html_body(self):
+ return "".join(self._get_resources("html_body"))
+
+
+class _UIModuleNamespace(object):
+ """Lazy namespace which creates UIModule proxies bound to a handler."""
+ def __init__(self, handler, ui_modules):
+ self.handler = handler
+ self.ui_modules = ui_modules
+
+ def __getitem__(self, key):
+ return self.handler._ui_module(key, self.ui_modules[key])
+
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError as e:
+ raise AttributeError(str(e))
+
+
+if hasattr(hmac, 'compare_digest'): # python 3.3
+ _time_independent_equals = hmac.compare_digest
+else:
+ def _time_independent_equals(a, b):
+ if len(a) != len(b):
+ return False
+ result = 0
+ if isinstance(a[0], int): # python3 byte strings
+ for x, y in zip(a, b):
+ result |= x ^ y
+ else: # python2
+ for x, y in zip(a, b):
+ result |= ord(x) ^ ord(y)
+ return result == 0
+
+
+def create_signed_value(secret, name, value, version=None, clock=None,
+ key_version=None):
+ if version is None:
+ version = DEFAULT_SIGNED_VALUE_VERSION
+ if clock is None:
+ clock = time.time
+
+ timestamp = utf8(str(int(clock())))
+ value = base64.b64encode(utf8(value))
+ if version == 1:
+ signature = _create_signature_v1(secret, name, value, timestamp)
+ value = b"|".join([value, timestamp, signature])
+ return value
+ elif version == 2:
+ # The v2 format consists of a version number and a series of
+ # length-prefixed fields "%d:%s", the last of which is a
+ # signature, all separated by pipes. All numbers are in
+ # decimal format with no leading zeros. The signature is an
+ # HMAC-SHA256 of the whole string up to that point, including
+ # the final pipe.
+ #
+ # The fields are:
+ # - format version (i.e. 2; no length prefix)
+ # - key version (integer, default is 0)
+ # - timestamp (integer seconds since epoch)
+ # - name (not encoded; assumed to be ~alphanumeric)
+ # - value (base64-encoded)
+ # - signature (hex-encoded; no length prefix)
+ def format_field(s):
+ return utf8("%d:" % len(s)) + utf8(s)
+ to_sign = b"|".join([
+ b"2",
+ format_field(str(key_version or 0)),
+ format_field(timestamp),
+ format_field(name),
+ format_field(value),
+ b''])
+
+ if isinstance(secret, dict):
+ assert key_version is not None, 'Key version must be set when sign key dict is used'
+ assert version >= 2, 'Version must be at least 2 for key version support'
+ secret = secret[key_version]
+
+ signature = _create_signature_v2(secret, to_sign)
+ return to_sign + signature
+ else:
+ raise ValueError("Unsupported version %d" % version)
+
+
+# A leading version number in decimal
+# with no leading zeros, followed by a pipe.
+_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
+
+
+def _get_version(value):
+ # Figures out what version value is. Version 1 did not include an
+ # explicit version field and started with arbitrary base64 data,
+ # which makes this tricky.
+ m = _signed_value_version_re.match(value)
+ if m is None:
+ version = 1
+ else:
+ try:
+ version = int(m.group(1))
+ if version > 999:
+ # Certain payloads from the version-less v1 format may
+ # be parsed as valid integers. Due to base64 padding
+ # restrictions, this can only happen for numbers whose
+ # length is a multiple of 4, so we can treat all
+ # numbers up to 999 as versions, and for the rest we
+ # fall back to v1 format.
+ version = 1
+ except ValueError:
+ version = 1
+ return version
+
+
+def decode_signed_value(secret, name, value, max_age_days=31,
+ clock=None, min_version=None):
+ if clock is None:
+ clock = time.time
+ if min_version is None:
+ min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
+ if min_version > 2:
+ raise ValueError("Unsupported min_version %d" % min_version)
+ if not value:
+ return None
+
+ value = utf8(value)
+ version = _get_version(value)
+
+ if version < min_version:
+ return None
+ if version == 1:
+ return _decode_signed_value_v1(secret, name, value,
+ max_age_days, clock)
+ elif version == 2:
+ return _decode_signed_value_v2(secret, name, value,
+ max_age_days, clock)
+ else:
+ return None
+
+
+def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
+ parts = utf8(value).split(b"|")
+ if len(parts) != 3:
+ return None
+ signature = _create_signature_v1(secret, name, parts[0], parts[1])
+ if not _time_independent_equals(parts[2], signature):
+ gen_log.warning("Invalid cookie signature %r", value)
+ return None
+ timestamp = int(parts[1])
+ if timestamp < clock() - max_age_days * 86400:
+ gen_log.warning("Expired cookie %r", value)
+ return None
+ if timestamp > clock() + 31 * 86400:
+ # _cookie_signature does not hash a delimiter between the
+ # parts of the cookie, so an attacker could transfer trailing
+ # digits from the payload to the timestamp without altering the
+ # signature. For backwards compatibility, sanity-check timestamp
+ # here instead of modifying _cookie_signature.
+ gen_log.warning("Cookie timestamp in future; possible tampering %r",
+ value)
+ return None
+ if parts[1].startswith(b"0"):
+ gen_log.warning("Tampered cookie %r", value)
+ return None
+ try:
+ return base64.b64decode(parts[0])
+ except Exception:
+ return None
+
+
+def _decode_fields_v2(value):
+ def _consume_field(s):
+ length, _, rest = s.partition(b':')
+ n = int(length)
+ field_value = rest[:n]
+ # In python 3, indexing bytes returns small integers; we must
+ # use a slice to get a byte string as in python 2.
+ if rest[n:n + 1] != b'|':
+ raise ValueError("malformed v2 signed value field")
+ rest = rest[n + 1:]
+ return field_value, rest
+
+ rest = value[2:] # remove version number
+ key_version, rest = _consume_field(rest)
+ timestamp, rest = _consume_field(rest)
+ name_field, rest = _consume_field(rest)
+ value_field, passed_sig = _consume_field(rest)
+ return int(key_version), timestamp, name_field, value_field, passed_sig
+
+
+def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
+ try:
+ key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
+ except ValueError:
+ return None
+ signed_string = value[:-len(passed_sig)]
+
+ if isinstance(secret, dict):
+ try:
+ secret = secret[key_version]
+ except KeyError:
+ return None
+
+ expected_sig = _create_signature_v2(secret, signed_string)
+ if not _time_independent_equals(passed_sig, expected_sig):
+ return None
+ if name_field != utf8(name):
+ return None
+ timestamp = int(timestamp)
+ if timestamp < clock() - max_age_days * 86400:
+ # The signature has expired.
+ return None
+ try:
+ return base64.b64decode(value_field)
+ except Exception:
+ return None
+
+
+def get_signature_key_version(value):
+ value = utf8(value)
+ version = _get_version(value)
+ if version < 2:
+ return None
+ try:
+ key_version, _, _, _, _ = _decode_fields_v2(value)
+ except ValueError:
+ return None
+
+ return key_version
+
+
+def _create_signature_v1(secret, *parts):
+ hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
+ for part in parts:
+ hash.update(utf8(part))
+ return utf8(hash.hexdigest())
+
+
+def _create_signature_v2(secret, s):
+ hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
+ hash.update(utf8(s))
+ return utf8(hash.hexdigest())
+
+
+def is_absolute(path):
+ return any(path.startswith(x) for x in ["/", "http:", "https:"])
diff --git a/contrib/python/tornado/tornado-4/tornado/websocket.py b/contrib/python/tornado/tornado-4/tornado/websocket.py
index 0e9d339f59..12086e116c 100644
--- a/contrib/python/tornado/tornado-4/tornado/websocket.py
+++ b/contrib/python/tornado/tornado-4/tornado/websocket.py
@@ -1,1244 +1,1244 @@
-"""Implementation of the WebSocket protocol.
-
-`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
-communication between the browser and server.
-
-WebSockets are supported in the current versions of all major browsers,
-although older versions that do not support WebSockets are still in use
-(refer to http://caniuse.com/websockets for details).
-
-This module implements the final version of the WebSocket protocol as
-defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
-browser versions (notably Safari 5.x) implemented an earlier draft of
-the protocol (known as "draft 76") and are not compatible with this module.
-
-.. versionchanged:: 4.0
- Removed support for the draft 76 protocol version.
-"""
-
-from __future__ import absolute_import, division, print_function
-# Author: Jacob Kristhammar, 2010
-
-import base64
-import collections
-import hashlib
-import os
-import struct
-import tornado.escape
-import tornado.web
-import zlib
-
-from tornado.concurrent import TracebackFuture
-from tornado.escape import utf8, native_str, to_unicode
-from tornado import gen, httpclient, httputil
-from tornado.ioloop import IOLoop, PeriodicCallback
-from tornado.iostream import StreamClosedError
-from tornado.log import gen_log, app_log
-from tornado import simple_httpclient
-from tornado.tcpclient import TCPClient
-from tornado.util import _websocket_mask, PY3
-
-if PY3:
- from urllib.parse import urlparse # py2
- xrange = range
-else:
- from urlparse import urlparse # py3
-
-
-class WebSocketError(Exception):
- pass
-
-
-class WebSocketClosedError(WebSocketError):
- """Raised by operations on a closed connection.
-
- .. versionadded:: 3.2
- """
- pass
-
-
-class WebSocketHandler(tornado.web.RequestHandler):
- """Subclass this class to create a basic WebSocket handler.
-
- Override `on_message` to handle incoming messages, and use
- `write_message` to send messages to the client. You can also
- override `open` and `on_close` to handle opened and closed
- connections.
-
- Custom upgrade response headers can be sent by overriding
- `~tornado.web.RequestHandler.set_default_headers` or
- `~tornado.web.RequestHandler.prepare`.
-
- See http://dev.w3.org/html5/websockets/ for details on the
- JavaScript interface. The protocol is specified at
- http://tools.ietf.org/html/rfc6455.
-
- Here is an example WebSocket handler that echos back all received messages
- back to the client:
-
- .. testcode::
-
- class EchoWebSocket(tornado.websocket.WebSocketHandler):
- def open(self):
- print("WebSocket opened")
-
- def on_message(self, message):
- self.write_message(u"You said: " + message)
-
- def on_close(self):
- print("WebSocket closed")
-
- .. testoutput::
- :hide:
-
- WebSockets are not standard HTTP connections. The "handshake" is
- HTTP, but after the handshake, the protocol is
- message-based. Consequently, most of the Tornado HTTP facilities
- are not available in handlers of this type. The only communication
- methods available to you are `write_message()`, `ping()`, and
- `close()`. Likewise, your request handler class should implement
- `open()` method rather than ``get()`` or ``post()``.
-
- If you map the handler above to ``/websocket`` in your application, you can
- invoke it in JavaScript with::
-
- var ws = new WebSocket("ws://localhost:8888/websocket");
- ws.onopen = function() {
- ws.send("Hello, world");
- };
- ws.onmessage = function (evt) {
- alert(evt.data);
- };
-
- This script pops up an alert box that says "You said: Hello, world".
-
- Web browsers allow any site to open a websocket connection to any other,
- instead of using the same-origin policy that governs other network
- access from javascript. This can be surprising and is a potential
- security hole, so since Tornado 4.0 `WebSocketHandler` requires
- applications that wish to receive cross-origin websockets to opt in
- by overriding the `~WebSocketHandler.check_origin` method (see that
- method's docs for details). Failure to do so is the most likely
- cause of 403 errors when making a websocket connection.
-
- When using a secure websocket connection (``wss://``) with a self-signed
- certificate, the connection from a browser may fail because it wants
- to show the "accept this certificate" dialog but has nowhere to show it.
- You must first visit a regular HTML page using the same certificate
- to accept it before the websocket connection will succeed.
-
- If the application setting ``websocket_ping_interval`` has a non-zero
- value, a ping will be sent periodically, and the connection will be
- closed if a response is not received before the ``websocket_ping_timeout``.
-
- Messages larger than the ``websocket_max_message_size`` application setting
- (default 10MiB) will not be accepted.
-
- .. versionchanged:: 4.5
- Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
- ``websocket_max_message_size``.
- """
- def __init__(self, application, request, **kwargs):
- super(WebSocketHandler, self).__init__(application, request, **kwargs)
- self.ws_connection = None
- self.close_code = None
- self.close_reason = None
- self.stream = None
- self._on_close_called = False
-
- @tornado.web.asynchronous
- def get(self, *args, **kwargs):
- self.open_args = args
- self.open_kwargs = kwargs
-
- # Upgrade header should be present and should be equal to WebSocket
- if self.request.headers.get("Upgrade", "").lower() != 'websocket':
- self.set_status(400)
- log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
- self.finish(log_msg)
- gen_log.debug(log_msg)
- return
-
- # Connection header should be upgrade.
- # Some proxy servers/load balancers
- # might mess with it.
- headers = self.request.headers
- connection = map(lambda s: s.strip().lower(),
- headers.get("Connection", "").split(","))
- if 'upgrade' not in connection:
- self.set_status(400)
- log_msg = "\"Connection\" must be \"Upgrade\"."
- self.finish(log_msg)
- gen_log.debug(log_msg)
- return
-
- # Handle WebSocket Origin naming convention differences
- # The difference between version 8 and 13 is that in 8 the
- # client sends a "Sec-Websocket-Origin" header and in 13 it's
- # simply "Origin".
- if "Origin" in self.request.headers:
- origin = self.request.headers.get("Origin")
- else:
- origin = self.request.headers.get("Sec-Websocket-Origin", None)
-
- # If there was an origin header, check to make sure it matches
- # according to check_origin. When the origin is None, we assume it
- # did not come from a browser and that it can be passed on.
- if origin is not None and not self.check_origin(origin):
- self.set_status(403)
- log_msg = "Cross origin websockets not allowed"
- self.finish(log_msg)
- gen_log.debug(log_msg)
- return
-
- self.ws_connection = self.get_websocket_protocol()
- if self.ws_connection:
- self.ws_connection.accept_connection()
- else:
- self.set_status(426, "Upgrade Required")
- self.set_header("Sec-WebSocket-Version", "7, 8, 13")
- self.finish()
-
- stream = None
-
- @property
- def ping_interval(self):
- """The interval for websocket keep-alive pings.
-
- Set websocket_ping_interval = 0 to disable pings.
- """
- return self.settings.get('websocket_ping_interval', None)
-
- @property
- def ping_timeout(self):
- """If no ping is received in this many seconds,
- close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
- Default is max of 3 pings or 30 seconds.
- """
- return self.settings.get('websocket_ping_timeout', None)
-
- @property
- def max_message_size(self):
- """Maximum allowed message size.
-
- If the remote peer sends a message larger than this, the connection
- will be closed.
-
- Default is 10MiB.
- """
- return self.settings.get('websocket_max_message_size', None)
-
- def write_message(self, message, binary=False):
- """Sends the given message to the client of this Web Socket.
-
- The message may be either a string or a dict (which will be
- encoded as json). If the ``binary`` argument is false, the
- message will be sent as utf8; in binary mode any byte string
- is allowed.
-
- If the connection is already closed, raises `WebSocketClosedError`.
-
- .. versionchanged:: 3.2
- `WebSocketClosedError` was added (previously a closed connection
- would raise an `AttributeError`)
-
- .. versionchanged:: 4.3
- Returns a `.Future` which can be used for flow control.
- """
- if self.ws_connection is None:
- raise WebSocketClosedError()
- if isinstance(message, dict):
- message = tornado.escape.json_encode(message)
- return self.ws_connection.write_message(message, binary=binary)
-
- def select_subprotocol(self, subprotocols):
- """Invoked when a new WebSocket requests specific subprotocols.
-
- ``subprotocols`` is a list of strings identifying the
- subprotocols proposed by the client. This method may be
- overridden to return one of those strings to select it, or
- ``None`` to not select a subprotocol. Failure to select a
- subprotocol does not automatically abort the connection,
- although clients may close the connection if none of their
- proposed subprotocols was selected.
- """
- return None
-
- def get_compression_options(self):
- """Override to return compression options for the connection.
-
- If this method returns None (the default), compression will
- be disabled. If it returns a dict (even an empty one), it
- will be enabled. The contents of the dict may be used to
- control the following compression options:
-
- ``compression_level`` specifies the compression level.
-
- ``mem_level`` specifies the amount of memory used for the internal compression state.
-
- These parameters are documented in details here:
- https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
-
- .. versionadded:: 4.1
-
- .. versionchanged:: 4.5
-
- Added ``compression_level`` and ``mem_level``.
- """
- # TODO: Add wbits option.
- return None
-
- def open(self, *args, **kwargs):
- """Invoked when a new WebSocket is opened.
-
- The arguments to `open` are extracted from the `tornado.web.URLSpec`
- regular expression, just like the arguments to
- `tornado.web.RequestHandler.get`.
- """
- pass
-
- def on_message(self, message):
- """Handle incoming messages on the WebSocket
-
- This method must be overridden.
-
- .. versionchanged:: 4.5
-
- ``on_message`` can be a coroutine.
- """
- raise NotImplementedError
-
- def ping(self, data):
- """Send ping frame to the remote end."""
- if self.ws_connection is None:
- raise WebSocketClosedError()
- self.ws_connection.write_ping(data)
-
- def on_pong(self, data):
- """Invoked when the response to a ping frame is received."""
- pass
-
- def on_ping(self, data):
- """Invoked when the a ping frame is received."""
- pass
-
- def on_close(self):
- """Invoked when the WebSocket is closed.
-
- If the connection was closed cleanly and a status code or reason
- phrase was supplied, these values will be available as the attributes
- ``self.close_code`` and ``self.close_reason``.
-
- .. versionchanged:: 4.0
-
- Added ``close_code`` and ``close_reason`` attributes.
- """
- pass
-
- def close(self, code=None, reason=None):
- """Closes this Web Socket.
-
- Once the close handshake is successful the socket will be closed.
-
- ``code`` may be a numeric status code, taken from the values
- defined in `RFC 6455 section 7.4.1
- <https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
- ``reason`` may be a textual message about why the connection is
- closing. These values are made available to the client, but are
- not otherwise interpreted by the websocket protocol.
-
- .. versionchanged:: 4.0
-
- Added the ``code`` and ``reason`` arguments.
- """
- if self.ws_connection:
- self.ws_connection.close(code, reason)
- self.ws_connection = None
-
- def check_origin(self, origin):
- """Override to enable support for allowing alternate origins.
-
- The ``origin`` argument is the value of the ``Origin`` HTTP
- header, the url responsible for initiating this request. This
- method is not called for clients that do not send this header;
- such requests are always allowed (because all browsers that
- implement WebSockets support this header, and non-browser
- clients do not have the same cross-site security concerns).
-
- Should return True to accept the request or False to reject it.
- By default, rejects all requests with an origin on a host other
- than this one.
-
- This is a security protection against cross site scripting attacks on
- browsers, since WebSockets are allowed to bypass the usual same-origin
- policies and don't use CORS headers.
-
- .. warning::
-
- This is an important security measure; don't disable it
- without understanding the security implications. In
- particular, if your authentication is cookie-based, you
- must either restrict the origins allowed by
- ``check_origin()`` or implement your own XSRF-like
- protection for websocket connections. See `these
- <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
- `articles
- <https://devcenter.heroku.com/articles/websocket-security>`_
- for more.
-
- To accept all cross-origin traffic (which was the default prior to
- Tornado 4.0), simply override this method to always return true::
-
- def check_origin(self, origin):
- return True
-
- To allow connections from any subdomain of your site, you might
- do something like::
-
- def check_origin(self, origin):
- parsed_origin = urllib.parse.urlparse(origin)
- return parsed_origin.netloc.endswith(".mydomain.com")
-
- .. versionadded:: 4.0
-
- """
- parsed_origin = urlparse(origin)
- origin = parsed_origin.netloc
- origin = origin.lower()
-
- host = self.request.headers.get("Host")
-
- # Check to see that origin matches host directly, including ports
- return origin == host
-
- def set_nodelay(self, value):
- """Set the no-delay flag for this stream.
-
- By default, small messages may be delayed and/or combined to minimize
- the number of packets sent. This can sometimes cause 200-500ms delays
- due to the interaction between Nagle's algorithm and TCP delayed
- ACKs. To reduce this delay (at the expense of possibly increasing
- bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
- connection is established.
-
- See `.BaseIOStream.set_nodelay` for additional details.
-
- .. versionadded:: 3.1
- """
- self.stream.set_nodelay(value)
-
- def on_connection_close(self):
- if self.ws_connection:
- self.ws_connection.on_connection_close()
- self.ws_connection = None
- if not self._on_close_called:
- self._on_close_called = True
- self.on_close()
- self._break_cycles()
-
- def _break_cycles(self):
- # WebSocketHandlers call finish() early, but we don't want to
- # break up reference cycles (which makes it impossible to call
- # self.render_string) until after we've really closed the
- # connection (if it was established in the first place,
- # indicated by status code 101).
- if self.get_status() != 101 or self._on_close_called:
- super(WebSocketHandler, self)._break_cycles()
-
- def send_error(self, *args, **kwargs):
- if self.stream is None:
- super(WebSocketHandler, self).send_error(*args, **kwargs)
- else:
- # If we get an uncaught exception during the handshake,
- # we have no choice but to abruptly close the connection.
- # TODO: for uncaught exceptions after the handshake,
- # we can close the connection more gracefully.
- self.stream.close()
-
- def get_websocket_protocol(self):
- websocket_version = self.request.headers.get("Sec-WebSocket-Version")
- if websocket_version in ("7", "8", "13"):
- return WebSocketProtocol13(
- self, compression_options=self.get_compression_options())
-
- def _attach_stream(self):
- self.stream = self.request.connection.detach()
- self.stream.set_close_callback(self.on_connection_close)
- # disable non-WS methods
- for method in ["write", "redirect", "set_header", "set_cookie",
- "set_status", "flush", "finish"]:
- setattr(self, method, _raise_not_supported_for_websockets)
-
-
-def _raise_not_supported_for_websockets(*args, **kwargs):
- raise RuntimeError("Method not supported for Web Sockets")
-
-
-class WebSocketProtocol(object):
- """Base class for WebSocket protocol versions.
- """
- def __init__(self, handler):
- self.handler = handler
- self.request = handler.request
- self.stream = handler.stream
- self.client_terminated = False
- self.server_terminated = False
-
- def _run_callback(self, callback, *args, **kwargs):
- """Runs the given callback with exception handling.
-
- If the callback is a coroutine, returns its Future. On error, aborts the
- websocket connection and returns None.
- """
- try:
- result = callback(*args, **kwargs)
- except Exception:
- app_log.error("Uncaught exception in %s",
- getattr(self.request, 'path', None), exc_info=True)
- self._abort()
- else:
- if result is not None:
- result = gen.convert_yielded(result)
- self.stream.io_loop.add_future(result, lambda f: f.result())
- return result
-
- def on_connection_close(self):
- self._abort()
-
- def _abort(self):
- """Instantly aborts the WebSocket connection by closing the socket"""
- self.client_terminated = True
- self.server_terminated = True
- self.stream.close() # forcibly tear down the connection
- self.close() # let the subclass cleanup
-
-
-class _PerMessageDeflateCompressor(object):
- def __init__(self, persistent, max_wbits, compression_options=None):
- if max_wbits is None:
- max_wbits = zlib.MAX_WBITS
- # There is no symbolic constant for the minimum wbits value.
- if not (8 <= max_wbits <= zlib.MAX_WBITS):
- raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
- max_wbits, zlib.MAX_WBITS)
- self._max_wbits = max_wbits
-
- if compression_options is None or 'compression_level' not in compression_options:
- self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
- else:
- self._compression_level = compression_options['compression_level']
-
- if compression_options is None or 'mem_level' not in compression_options:
- self._mem_level = 8
- else:
- self._mem_level = compression_options['mem_level']
-
- if persistent:
- self._compressor = self._create_compressor()
- else:
- self._compressor = None
-
- def _create_compressor(self):
- return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
-
- def compress(self, data):
- compressor = self._compressor or self._create_compressor()
- data = (compressor.compress(data) +
- compressor.flush(zlib.Z_SYNC_FLUSH))
- assert data.endswith(b'\x00\x00\xff\xff')
- return data[:-4]
-
-
-class _PerMessageDeflateDecompressor(object):
- def __init__(self, persistent, max_wbits, compression_options=None):
- if max_wbits is None:
- max_wbits = zlib.MAX_WBITS
- if not (8 <= max_wbits <= zlib.MAX_WBITS):
- raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
- max_wbits, zlib.MAX_WBITS)
- self._max_wbits = max_wbits
- if persistent:
- self._decompressor = self._create_decompressor()
- else:
- self._decompressor = None
-
- def _create_decompressor(self):
- return zlib.decompressobj(-self._max_wbits)
-
- def decompress(self, data):
- decompressor = self._decompressor or self._create_decompressor()
- return decompressor.decompress(data + b'\x00\x00\xff\xff')
-
-
-class WebSocketProtocol13(WebSocketProtocol):
- """Implementation of the WebSocket protocol from RFC 6455.
-
- This class supports versions 7 and 8 of the protocol in addition to the
- final version 13.
- """
- # Bit masks for the first byte of a frame.
- FIN = 0x80
- RSV1 = 0x40
- RSV2 = 0x20
- RSV3 = 0x10
- RSV_MASK = RSV1 | RSV2 | RSV3
- OPCODE_MASK = 0x0f
-
- def __init__(self, handler, mask_outgoing=False,
- compression_options=None):
- WebSocketProtocol.__init__(self, handler)
- self.mask_outgoing = mask_outgoing
- self._final_frame = False
- self._frame_opcode = None
- self._masked_frame = None
- self._frame_mask = None
- self._frame_length = None
- self._fragmented_message_buffer = None
- self._fragmented_message_opcode = None
- self._waiting = None
- self._compression_options = compression_options
- self._decompressor = None
- self._compressor = None
- self._frame_compressed = None
- # The total uncompressed size of all messages received or sent.
- # Unicode messages are encoded to utf8.
- # Only for testing; subject to change.
- self._message_bytes_in = 0
- self._message_bytes_out = 0
- # The total size of all packets received or sent. Includes
- # the effect of compression, frame overhead, and control frames.
- self._wire_bytes_in = 0
- self._wire_bytes_out = 0
- self.ping_callback = None
- self.last_ping = 0
- self.last_pong = 0
-
- def accept_connection(self):
- try:
- self._handle_websocket_headers()
- except ValueError:
- self.handler.set_status(400)
- log_msg = "Missing/Invalid WebSocket headers"
- self.handler.finish(log_msg)
- gen_log.debug(log_msg)
- return
-
- try:
- self._accept_connection()
- except ValueError:
- gen_log.debug("Malformed WebSocket request received",
- exc_info=True)
- self._abort()
- return
-
- def _handle_websocket_headers(self):
- """Verifies all invariant- and required headers
-
- If a header is missing or have an incorrect value ValueError will be
- raised
- """
- fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
- if not all(map(lambda f: self.request.headers.get(f), fields)):
- raise ValueError("Missing/Invalid WebSocket headers")
-
- @staticmethod
- def compute_accept_value(key):
- """Computes the value for the Sec-WebSocket-Accept header,
- given the value for Sec-WebSocket-Key.
- """
- sha1 = hashlib.sha1()
- sha1.update(utf8(key))
- sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
- return native_str(base64.b64encode(sha1.digest()))
-
- def _challenge_response(self):
- return WebSocketProtocol13.compute_accept_value(
- self.request.headers.get("Sec-Websocket-Key"))
-
- def _accept_connection(self):
- subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
- subprotocols = [s.strip() for s in subprotocols.split(',')]
- if subprotocols:
- selected = self.handler.select_subprotocol(subprotocols)
- if selected:
- assert selected in subprotocols
- self.handler.set_header("Sec-WebSocket-Protocol", selected)
-
- extensions = self._parse_extensions_header(self.request.headers)
- for ext in extensions:
- if (ext[0] == 'permessage-deflate' and
- self._compression_options is not None):
- # TODO: negotiate parameters if compression_options
- # specifies limits.
- self._create_compressors('server', ext[1], self._compression_options)
- if ('client_max_window_bits' in ext[1] and
- ext[1]['client_max_window_bits'] is None):
- # Don't echo an offered client_max_window_bits
- # parameter with no value.
- del ext[1]['client_max_window_bits']
- self.handler.set_header("Sec-WebSocket-Extensions",
- httputil._encode_header(
- 'permessage-deflate', ext[1]))
- break
-
- self.handler.clear_header("Content-Type")
- self.handler.set_status(101)
- self.handler.set_header("Upgrade", "websocket")
- self.handler.set_header("Connection", "Upgrade")
- self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
- self.handler.finish()
-
- self.handler._attach_stream()
- self.stream = self.handler.stream
-
- self.start_pinging()
- self._run_callback(self.handler.open, *self.handler.open_args,
- **self.handler.open_kwargs)
- self._receive_frame()
-
- def _parse_extensions_header(self, headers):
- extensions = headers.get("Sec-WebSocket-Extensions", '')
- if extensions:
- return [httputil._parse_header(e.strip())
- for e in extensions.split(',')]
- return []
-
- def _process_server_headers(self, key, headers):
- """Process the headers sent by the server to this client connection.
-
- 'key' is the websocket handshake challenge/response key.
- """
- assert headers['Upgrade'].lower() == 'websocket'
- assert headers['Connection'].lower() == 'upgrade'
- accept = self.compute_accept_value(key)
- assert headers['Sec-Websocket-Accept'] == accept
-
- extensions = self._parse_extensions_header(headers)
- for ext in extensions:
- if (ext[0] == 'permessage-deflate' and
- self._compression_options is not None):
- self._create_compressors('client', ext[1])
- else:
- raise ValueError("unsupported extension %r", ext)
-
- def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
- """Converts a websocket agreed_parameters set to keyword arguments
- for our compressor objects.
- """
- options = dict(
- persistent=(side + '_no_context_takeover') not in agreed_parameters)
- wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
- if wbits_header is None:
- options['max_wbits'] = zlib.MAX_WBITS
- else:
- options['max_wbits'] = int(wbits_header)
- options['compression_options'] = compression_options
- return options
-
- def _create_compressors(self, side, agreed_parameters, compression_options=None):
- # TODO: handle invalid parameters gracefully
- allowed_keys = set(['server_no_context_takeover',
- 'client_no_context_takeover',
- 'server_max_window_bits',
- 'client_max_window_bits'])
- for key in agreed_parameters:
- if key not in allowed_keys:
- raise ValueError("unsupported compression parameter %r" % key)
- other_side = 'client' if (side == 'server') else 'server'
- self._compressor = _PerMessageDeflateCompressor(
- **self._get_compressor_options(side, agreed_parameters, compression_options))
- self._decompressor = _PerMessageDeflateDecompressor(
- **self._get_compressor_options(other_side, agreed_parameters, compression_options))
-
- def _write_frame(self, fin, opcode, data, flags=0):
- if fin:
- finbit = self.FIN
- else:
- finbit = 0
- frame = struct.pack("B", finbit | opcode | flags)
- l = len(data)
- if self.mask_outgoing:
- mask_bit = 0x80
- else:
- mask_bit = 0
- if l < 126:
- frame += struct.pack("B", l | mask_bit)
- elif l <= 0xFFFF:
- frame += struct.pack("!BH", 126 | mask_bit, l)
- else:
- frame += struct.pack("!BQ", 127 | mask_bit, l)
- if self.mask_outgoing:
- mask = os.urandom(4)
- data = mask + _websocket_mask(mask, data)
- frame += data
- self._wire_bytes_out += len(frame)
- try:
- return self.stream.write(frame)
- except StreamClosedError:
- self._abort()
-
- def write_message(self, message, binary=False):
- """Sends the given message to the client of this Web Socket."""
- if binary:
- opcode = 0x2
- else:
- opcode = 0x1
- message = tornado.escape.utf8(message)
- assert isinstance(message, bytes)
- self._message_bytes_out += len(message)
- flags = 0
- if self._compressor:
- message = self._compressor.compress(message)
- flags |= self.RSV1
- return self._write_frame(True, opcode, message, flags=flags)
-
- def write_ping(self, data):
- """Send ping frame."""
- assert isinstance(data, bytes)
- self._write_frame(True, 0x9, data)
-
- def _receive_frame(self):
- try:
- self.stream.read_bytes(2, self._on_frame_start)
- except StreamClosedError:
- self._abort()
-
- def _on_frame_start(self, data):
- self._wire_bytes_in += len(data)
- header, payloadlen = struct.unpack("BB", data)
- self._final_frame = header & self.FIN
- reserved_bits = header & self.RSV_MASK
- self._frame_opcode = header & self.OPCODE_MASK
- self._frame_opcode_is_control = self._frame_opcode & 0x8
- if self._decompressor is not None and self._frame_opcode != 0:
- self._frame_compressed = bool(reserved_bits & self.RSV1)
- reserved_bits &= ~self.RSV1
- if reserved_bits:
- # client is using as-yet-undefined extensions; abort
- self._abort()
- return
- self._masked_frame = bool(payloadlen & 0x80)
- payloadlen = payloadlen & 0x7f
- if self._frame_opcode_is_control and payloadlen >= 126:
- # control frames must have payload < 126
- self._abort()
- return
- try:
- if payloadlen < 126:
- self._frame_length = payloadlen
- if self._masked_frame:
- self.stream.read_bytes(4, self._on_masking_key)
- else:
- self._read_frame_data(False)
- elif payloadlen == 126:
- self.stream.read_bytes(2, self._on_frame_length_16)
- elif payloadlen == 127:
- self.stream.read_bytes(8, self._on_frame_length_64)
- except StreamClosedError:
- self._abort()
-
- def _read_frame_data(self, masked):
- new_len = self._frame_length
- if self._fragmented_message_buffer is not None:
- new_len += len(self._fragmented_message_buffer)
- if new_len > (self.handler.max_message_size or 10 * 1024 * 1024):
- self.close(1009, "message too big")
- return
- self.stream.read_bytes(
- self._frame_length,
- self._on_masked_frame_data if masked else self._on_frame_data)
-
- def _on_frame_length_16(self, data):
- self._wire_bytes_in += len(data)
- self._frame_length = struct.unpack("!H", data)[0]
- try:
- if self._masked_frame:
- self.stream.read_bytes(4, self._on_masking_key)
- else:
- self._read_frame_data(False)
- except StreamClosedError:
- self._abort()
-
- def _on_frame_length_64(self, data):
- self._wire_bytes_in += len(data)
- self._frame_length = struct.unpack("!Q", data)[0]
- try:
- if self._masked_frame:
- self.stream.read_bytes(4, self._on_masking_key)
- else:
- self._read_frame_data(False)
- except StreamClosedError:
- self._abort()
-
- def _on_masking_key(self, data):
- self._wire_bytes_in += len(data)
- self._frame_mask = data
- try:
- self._read_frame_data(True)
- except StreamClosedError:
- self._abort()
-
- def _on_masked_frame_data(self, data):
- # Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
- self._on_frame_data(_websocket_mask(self._frame_mask, data))
-
- def _on_frame_data(self, data):
- handled_future = None
-
- self._wire_bytes_in += len(data)
- if self._frame_opcode_is_control:
- # control frames may be interleaved with a series of fragmented
- # data frames, so control frames must not interact with
- # self._fragmented_*
- if not self._final_frame:
- # control frames must not be fragmented
- self._abort()
- return
- opcode = self._frame_opcode
- elif self._frame_opcode == 0: # continuation frame
- if self._fragmented_message_buffer is None:
- # nothing to continue
- self._abort()
- return
- self._fragmented_message_buffer += data
- if self._final_frame:
- opcode = self._fragmented_message_opcode
- data = self._fragmented_message_buffer
- self._fragmented_message_buffer = None
- else: # start of new data message
- if self._fragmented_message_buffer is not None:
- # can't start new message until the old one is finished
- self._abort()
- return
- if self._final_frame:
- opcode = self._frame_opcode
- else:
- self._fragmented_message_opcode = self._frame_opcode
- self._fragmented_message_buffer = data
-
- if self._final_frame:
- handled_future = self._handle_message(opcode, data)
-
- if not self.client_terminated:
- if handled_future:
- # on_message is a coroutine, process more frames once it's done.
- handled_future.add_done_callback(
- lambda future: self._receive_frame())
- else:
- self._receive_frame()
-
- def _handle_message(self, opcode, data):
- """Execute on_message, returning its Future if it is a coroutine."""
- if self.client_terminated:
- return
-
- if self._frame_compressed:
- data = self._decompressor.decompress(data)
-
- if opcode == 0x1:
- # UTF-8 data
- self._message_bytes_in += len(data)
- try:
- decoded = data.decode("utf-8")
- except UnicodeDecodeError:
- self._abort()
- return
- return self._run_callback(self.handler.on_message, decoded)
- elif opcode == 0x2:
- # Binary data
- self._message_bytes_in += len(data)
- return self._run_callback(self.handler.on_message, data)
- elif opcode == 0x8:
- # Close
- self.client_terminated = True
- if len(data) >= 2:
- self.handler.close_code = struct.unpack('>H', data[:2])[0]
- if len(data) > 2:
- self.handler.close_reason = to_unicode(data[2:])
- # Echo the received close code, if any (RFC 6455 section 5.5.1).
- self.close(self.handler.close_code)
- elif opcode == 0x9:
- # Ping
- self._write_frame(True, 0xA, data)
- self._run_callback(self.handler.on_ping, data)
- elif opcode == 0xA:
- # Pong
- self.last_pong = IOLoop.current().time()
- return self._run_callback(self.handler.on_pong, data)
- else:
- self._abort()
-
- def close(self, code=None, reason=None):
- """Closes the WebSocket connection."""
- if not self.server_terminated:
- if not self.stream.closed():
- if code is None and reason is not None:
- code = 1000 # "normal closure" status code
- if code is None:
- close_data = b''
- else:
- close_data = struct.pack('>H', code)
- if reason is not None:
- close_data += utf8(reason)
- self._write_frame(True, 0x8, close_data)
- self.server_terminated = True
- if self.client_terminated:
- if self._waiting is not None:
- self.stream.io_loop.remove_timeout(self._waiting)
- self._waiting = None
- self.stream.close()
- elif self._waiting is None:
- # Give the client a few seconds to complete a clean shutdown,
- # otherwise just close the connection.
- self._waiting = self.stream.io_loop.add_timeout(
- self.stream.io_loop.time() + 5, self._abort)
-
- @property
- def ping_interval(self):
- interval = self.handler.ping_interval
- if interval is not None:
- return interval
- return 0
-
- @property
- def ping_timeout(self):
- timeout = self.handler.ping_timeout
- if timeout is not None:
- return timeout
- return max(3 * self.ping_interval, 30)
-
- def start_pinging(self):
- """Start sending periodic pings to keep the connection alive"""
- if self.ping_interval > 0:
- self.last_ping = self.last_pong = IOLoop.current().time()
- self.ping_callback = PeriodicCallback(
- self.periodic_ping, self.ping_interval * 1000)
- self.ping_callback.start()
-
- def periodic_ping(self):
- """Send a ping to keep the websocket alive
-
- Called periodically if the websocket_ping_interval is set and non-zero.
- """
- if self.stream.closed() and self.ping_callback is not None:
- self.ping_callback.stop()
- return
-
- # Check for timeout on pong. Make sure that we really have
- # sent a recent ping in case the machine with both server and
- # client has been suspended since the last ping.
- now = IOLoop.current().time()
- since_last_pong = now - self.last_pong
- since_last_ping = now - self.last_ping
- if (since_last_ping < 2 * self.ping_interval and
- since_last_pong > self.ping_timeout):
- self.close()
- return
-
- self.write_ping(b'')
- self.last_ping = now
-
-
-class WebSocketClientConnection(simple_httpclient._HTTPConnection):
- """WebSocket client connection.
-
- This class should not be instantiated directly; use the
- `websocket_connect` function instead.
- """
- def __init__(self, io_loop, request, on_message_callback=None,
- compression_options=None, ping_interval=None, ping_timeout=None,
- max_message_size=None):
- self.compression_options = compression_options
- self.connect_future = TracebackFuture()
- self.protocol = None
- self.read_future = None
- self.read_queue = collections.deque()
- self.key = base64.b64encode(os.urandom(16))
- self._on_message_callback = on_message_callback
- self.close_code = self.close_reason = None
- self.ping_interval = ping_interval
- self.ping_timeout = ping_timeout
- self.max_message_size = max_message_size
-
- scheme, sep, rest = request.url.partition(':')
- scheme = {'ws': 'http', 'wss': 'https'}[scheme]
- request.url = scheme + sep + rest
- request.headers.update({
- 'Upgrade': 'websocket',
- 'Connection': 'Upgrade',
- 'Sec-WebSocket-Key': self.key,
- 'Sec-WebSocket-Version': '13',
- })
- if self.compression_options is not None:
- # Always offer to let the server set our max_wbits (and even though
- # we don't offer it, we will accept a client_no_context_takeover
- # from the server).
- # TODO: set server parameters for deflate extension
- # if requested in self.compression_options.
- request.headers['Sec-WebSocket-Extensions'] = (
- 'permessage-deflate; client_max_window_bits')
-
- self.tcp_client = TCPClient(io_loop=io_loop)
- super(WebSocketClientConnection, self).__init__(
- io_loop, None, request, lambda: None, self._on_http_response,
- 104857600, self.tcp_client, 65536, 104857600)
-
- def close(self, code=None, reason=None):
- """Closes the websocket connection.
-
- ``code`` and ``reason`` are documented under
- `WebSocketHandler.close`.
-
- .. versionadded:: 3.2
-
- .. versionchanged:: 4.0
-
- Added the ``code`` and ``reason`` arguments.
- """
- if self.protocol is not None:
- self.protocol.close(code, reason)
- self.protocol = None
-
- def on_connection_close(self):
- if not self.connect_future.done():
- self.connect_future.set_exception(StreamClosedError())
- self.on_message(None)
- self.tcp_client.close()
- super(WebSocketClientConnection, self).on_connection_close()
-
- def _on_http_response(self, response):
- if not self.connect_future.done():
- if response.error:
- self.connect_future.set_exception(response.error)
- else:
- self.connect_future.set_exception(WebSocketError(
- "Non-websocket response"))
-
- def headers_received(self, start_line, headers):
- if start_line.code != 101:
- return super(WebSocketClientConnection, self).headers_received(
- start_line, headers)
-
- self.headers = headers
- self.protocol = self.get_websocket_protocol()
- self.protocol._process_server_headers(self.key, self.headers)
- self.protocol.start_pinging()
- self.protocol._receive_frame()
-
- if self._timeout is not None:
- self.io_loop.remove_timeout(self._timeout)
- self._timeout = None
-
- self.stream = self.connection.detach()
- self.stream.set_close_callback(self.on_connection_close)
- # Once we've taken over the connection, clear the final callback
- # we set on the http request. This deactivates the error handling
- # in simple_httpclient that would otherwise interfere with our
- # ability to see exceptions.
- self.final_callback = None
-
- self.connect_future.set_result(self)
-
- def write_message(self, message, binary=False):
- """Sends a message to the WebSocket server."""
- return self.protocol.write_message(message, binary)
-
- def read_message(self, callback=None):
- """Reads a message from the WebSocket server.
-
- If on_message_callback was specified at WebSocket
- initialization, this function will never return messages
-
- Returns a future whose result is the message, or None
- if the connection is closed. If a callback argument
- is given it will be called with the future when it is
- ready.
- """
- assert self.read_future is None
- future = TracebackFuture()
- if self.read_queue:
- future.set_result(self.read_queue.popleft())
- else:
- self.read_future = future
- if callback is not None:
- self.io_loop.add_future(future, callback)
- return future
-
- def on_message(self, message):
- if self._on_message_callback:
- self._on_message_callback(message)
- elif self.read_future is not None:
- self.read_future.set_result(message)
- self.read_future = None
- else:
- self.read_queue.append(message)
-
- def on_pong(self, data):
- pass
-
- def on_ping(self, data):
- pass
-
- def get_websocket_protocol(self):
- return WebSocketProtocol13(self, mask_outgoing=True,
- compression_options=self.compression_options)
-
-
-def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
- on_message_callback=None, compression_options=None,
- ping_interval=None, ping_timeout=None,
- max_message_size=None):
- """Client-side websocket support.
-
- Takes a url and returns a Future whose result is a
- `WebSocketClientConnection`.
-
- ``compression_options`` is interpreted in the same way as the
- return value of `.WebSocketHandler.get_compression_options`.
-
- The connection supports two styles of operation. In the coroutine
- style, the application typically calls
- `~.WebSocketClientConnection.read_message` in a loop::
-
- conn = yield websocket_connect(url)
- while True:
- msg = yield conn.read_message()
- if msg is None: break
- # Do something with msg
-
- In the callback style, pass an ``on_message_callback`` to
- ``websocket_connect``. In both styles, a message of ``None``
- indicates that the connection has been closed.
-
- .. versionchanged:: 3.2
- Also accepts ``HTTPRequest`` objects in place of urls.
-
- .. versionchanged:: 4.1
- Added ``compression_options`` and ``on_message_callback``.
- The ``io_loop`` argument is deprecated.
-
- .. versionchanged:: 4.5
- Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
- arguments, which have the same meaning as in `WebSocketHandler`.
- """
- if io_loop is None:
- io_loop = IOLoop.current()
- if isinstance(url, httpclient.HTTPRequest):
- assert connect_timeout is None
- request = url
- # Copy and convert the headers dict/object (see comments in
- # AsyncHTTPClient.fetch)
- request.headers = httputil.HTTPHeaders(request.headers)
- else:
- request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
- request = httpclient._RequestProxy(
- request, httpclient.HTTPRequest._DEFAULTS)
- conn = WebSocketClientConnection(io_loop, request,
- on_message_callback=on_message_callback,
- compression_options=compression_options,
- ping_interval=ping_interval,
- ping_timeout=ping_timeout,
- max_message_size=max_message_size)
- if callback is not None:
- io_loop.add_future(conn.connect_future, callback)
- return conn.connect_future
+"""Implementation of the WebSocket protocol.
+
+`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
+communication between the browser and server.
+
+WebSockets are supported in the current versions of all major browsers,
+although older versions that do not support WebSockets are still in use
+(refer to http://caniuse.com/websockets for details).
+
+This module implements the final version of the WebSocket protocol as
+defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
+browser versions (notably Safari 5.x) implemented an earlier draft of
+the protocol (known as "draft 76") and are not compatible with this module.
+
+.. versionchanged:: 4.0
+ Removed support for the draft 76 protocol version.
+"""
+
+from __future__ import absolute_import, division, print_function
+# Author: Jacob Kristhammar, 2010
+
+import base64
+import collections
+import hashlib
+import os
+import struct
+import tornado.escape
+import tornado.web
+import zlib
+
+from tornado.concurrent import TracebackFuture
+from tornado.escape import utf8, native_str, to_unicode
+from tornado import gen, httpclient, httputil
+from tornado.ioloop import IOLoop, PeriodicCallback
+from tornado.iostream import StreamClosedError
+from tornado.log import gen_log, app_log
+from tornado import simple_httpclient
+from tornado.tcpclient import TCPClient
+from tornado.util import _websocket_mask, PY3
+
+if PY3:
+ from urllib.parse import urlparse # py2
+ xrange = range
+else:
+ from urlparse import urlparse # py3
+
+
+class WebSocketError(Exception):
+ pass
+
+
+class WebSocketClosedError(WebSocketError):
+ """Raised by operations on a closed connection.
+
+ .. versionadded:: 3.2
+ """
+ pass
+
+
+class WebSocketHandler(tornado.web.RequestHandler):
+ """Subclass this class to create a basic WebSocket handler.
+
+ Override `on_message` to handle incoming messages, and use
+ `write_message` to send messages to the client. You can also
+ override `open` and `on_close` to handle opened and closed
+ connections.
+
+ Custom upgrade response headers can be sent by overriding
+ `~tornado.web.RequestHandler.set_default_headers` or
+ `~tornado.web.RequestHandler.prepare`.
+
+ See http://dev.w3.org/html5/websockets/ for details on the
+ JavaScript interface. The protocol is specified at
+ http://tools.ietf.org/html/rfc6455.
+
+ Here is an example WebSocket handler that echos back all received messages
+ back to the client:
+
+ .. testcode::
+
+ class EchoWebSocket(tornado.websocket.WebSocketHandler):
+ def open(self):
+ print("WebSocket opened")
+
+ def on_message(self, message):
+ self.write_message(u"You said: " + message)
+
+ def on_close(self):
+ print("WebSocket closed")
+
+ .. testoutput::
+ :hide:
+
+ WebSockets are not standard HTTP connections. The "handshake" is
+ HTTP, but after the handshake, the protocol is
+ message-based. Consequently, most of the Tornado HTTP facilities
+ are not available in handlers of this type. The only communication
+ methods available to you are `write_message()`, `ping()`, and
+ `close()`. Likewise, your request handler class should implement
+ `open()` method rather than ``get()`` or ``post()``.
+
+ If you map the handler above to ``/websocket`` in your application, you can
+ invoke it in JavaScript with::
+
+ var ws = new WebSocket("ws://localhost:8888/websocket");
+ ws.onopen = function() {
+ ws.send("Hello, world");
+ };
+ ws.onmessage = function (evt) {
+ alert(evt.data);
+ };
+
+ This script pops up an alert box that says "You said: Hello, world".
+
+ Web browsers allow any site to open a websocket connection to any other,
+ instead of using the same-origin policy that governs other network
+ access from javascript. This can be surprising and is a potential
+ security hole, so since Tornado 4.0 `WebSocketHandler` requires
+ applications that wish to receive cross-origin websockets to opt in
+ by overriding the `~WebSocketHandler.check_origin` method (see that
+ method's docs for details). Failure to do so is the most likely
+ cause of 403 errors when making a websocket connection.
+
+ When using a secure websocket connection (``wss://``) with a self-signed
+ certificate, the connection from a browser may fail because it wants
+ to show the "accept this certificate" dialog but has nowhere to show it.
+ You must first visit a regular HTML page using the same certificate
+ to accept it before the websocket connection will succeed.
+
+ If the application setting ``websocket_ping_interval`` has a non-zero
+ value, a ping will be sent periodically, and the connection will be
+ closed if a response is not received before the ``websocket_ping_timeout``.
+
+ Messages larger than the ``websocket_max_message_size`` application setting
+ (default 10MiB) will not be accepted.
+
+ .. versionchanged:: 4.5
+ Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
+ ``websocket_max_message_size``.
+ """
+ def __init__(self, application, request, **kwargs):
+ super(WebSocketHandler, self).__init__(application, request, **kwargs)
+ self.ws_connection = None
+ self.close_code = None
+ self.close_reason = None
+ self.stream = None
+ self._on_close_called = False
+
+ @tornado.web.asynchronous
+ def get(self, *args, **kwargs):
+ self.open_args = args
+ self.open_kwargs = kwargs
+
+ # Upgrade header should be present and should be equal to WebSocket
+ if self.request.headers.get("Upgrade", "").lower() != 'websocket':
+ self.set_status(400)
+ log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
+ self.finish(log_msg)
+ gen_log.debug(log_msg)
+ return
+
+ # Connection header should be upgrade.
+ # Some proxy servers/load balancers
+ # might mess with it.
+ headers = self.request.headers
+ connection = map(lambda s: s.strip().lower(),
+ headers.get("Connection", "").split(","))
+ if 'upgrade' not in connection:
+ self.set_status(400)
+ log_msg = "\"Connection\" must be \"Upgrade\"."
+ self.finish(log_msg)
+ gen_log.debug(log_msg)
+ return
+
+ # Handle WebSocket Origin naming convention differences
+ # The difference between version 8 and 13 is that in 8 the
+ # client sends a "Sec-Websocket-Origin" header and in 13 it's
+ # simply "Origin".
+ if "Origin" in self.request.headers:
+ origin = self.request.headers.get("Origin")
+ else:
+ origin = self.request.headers.get("Sec-Websocket-Origin", None)
+
+ # If there was an origin header, check to make sure it matches
+ # according to check_origin. When the origin is None, we assume it
+ # did not come from a browser and that it can be passed on.
+ if origin is not None and not self.check_origin(origin):
+ self.set_status(403)
+ log_msg = "Cross origin websockets not allowed"
+ self.finish(log_msg)
+ gen_log.debug(log_msg)
+ return
+
+ self.ws_connection = self.get_websocket_protocol()
+ if self.ws_connection:
+ self.ws_connection.accept_connection()
+ else:
+ self.set_status(426, "Upgrade Required")
+ self.set_header("Sec-WebSocket-Version", "7, 8, 13")
+ self.finish()
+
+ stream = None
+
+ @property
+ def ping_interval(self):
+ """The interval for websocket keep-alive pings.
+
+ Set websocket_ping_interval = 0 to disable pings.
+ """
+ return self.settings.get('websocket_ping_interval', None)
+
+ @property
+ def ping_timeout(self):
+ """If no ping is received in this many seconds,
+ close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
+ Default is max of 3 pings or 30 seconds.
+ """
+ return self.settings.get('websocket_ping_timeout', None)
+
+ @property
+ def max_message_size(self):
+ """Maximum allowed message size.
+
+ If the remote peer sends a message larger than this, the connection
+ will be closed.
+
+ Default is 10MiB.
+ """
+ return self.settings.get('websocket_max_message_size', None)
+
+ def write_message(self, message, binary=False):
+ """Sends the given message to the client of this Web Socket.
+
+ The message may be either a string or a dict (which will be
+ encoded as json). If the ``binary`` argument is false, the
+ message will be sent as utf8; in binary mode any byte string
+ is allowed.
+
+ If the connection is already closed, raises `WebSocketClosedError`.
+
+ .. versionchanged:: 3.2
+ `WebSocketClosedError` was added (previously a closed connection
+ would raise an `AttributeError`)
+
+ .. versionchanged:: 4.3
+ Returns a `.Future` which can be used for flow control.
+ """
+ if self.ws_connection is None:
+ raise WebSocketClosedError()
+ if isinstance(message, dict):
+ message = tornado.escape.json_encode(message)
+ return self.ws_connection.write_message(message, binary=binary)
+
+ def select_subprotocol(self, subprotocols):
+ """Invoked when a new WebSocket requests specific subprotocols.
+
+ ``subprotocols`` is a list of strings identifying the
+ subprotocols proposed by the client. This method may be
+ overridden to return one of those strings to select it, or
+ ``None`` to not select a subprotocol. Failure to select a
+ subprotocol does not automatically abort the connection,
+ although clients may close the connection if none of their
+ proposed subprotocols was selected.
+ """
+ return None
+
+ def get_compression_options(self):
+ """Override to return compression options for the connection.
+
+ If this method returns None (the default), compression will
+ be disabled. If it returns a dict (even an empty one), it
+ will be enabled. The contents of the dict may be used to
+ control the following compression options:
+
+ ``compression_level`` specifies the compression level.
+
+ ``mem_level`` specifies the amount of memory used for the internal compression state.
+
+ These parameters are documented in details here:
+ https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
+
+ .. versionadded:: 4.1
+
+ .. versionchanged:: 4.5
+
+ Added ``compression_level`` and ``mem_level``.
+ """
+ # TODO: Add wbits option.
+ return None
+
+ def open(self, *args, **kwargs):
+ """Invoked when a new WebSocket is opened.
+
+ The arguments to `open` are extracted from the `tornado.web.URLSpec`
+ regular expression, just like the arguments to
+ `tornado.web.RequestHandler.get`.
+ """
+ pass
+
+ def on_message(self, message):
+ """Handle incoming messages on the WebSocket
+
+ This method must be overridden.
+
+ .. versionchanged:: 4.5
+
+ ``on_message`` can be a coroutine.
+ """
+ raise NotImplementedError
+
+ def ping(self, data):
+ """Send ping frame to the remote end."""
+ if self.ws_connection is None:
+ raise WebSocketClosedError()
+ self.ws_connection.write_ping(data)
+
+ def on_pong(self, data):
+ """Invoked when the response to a ping frame is received."""
+ pass
+
+ def on_ping(self, data):
+ """Invoked when the a ping frame is received."""
+ pass
+
+ def on_close(self):
+ """Invoked when the WebSocket is closed.
+
+ If the connection was closed cleanly and a status code or reason
+ phrase was supplied, these values will be available as the attributes
+ ``self.close_code`` and ``self.close_reason``.
+
+ .. versionchanged:: 4.0
+
+ Added ``close_code`` and ``close_reason`` attributes.
+ """
+ pass
+
+ def close(self, code=None, reason=None):
+ """Closes this Web Socket.
+
+ Once the close handshake is successful the socket will be closed.
+
+ ``code`` may be a numeric status code, taken from the values
+ defined in `RFC 6455 section 7.4.1
+ <https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
+ ``reason`` may be a textual message about why the connection is
+ closing. These values are made available to the client, but are
+ not otherwise interpreted by the websocket protocol.
+
+ .. versionchanged:: 4.0
+
+ Added the ``code`` and ``reason`` arguments.
+ """
+ if self.ws_connection:
+ self.ws_connection.close(code, reason)
+ self.ws_connection = None
+
+ def check_origin(self, origin):
+ """Override to enable support for allowing alternate origins.
+
+ The ``origin`` argument is the value of the ``Origin`` HTTP
+ header, the url responsible for initiating this request. This
+ method is not called for clients that do not send this header;
+ such requests are always allowed (because all browsers that
+ implement WebSockets support this header, and non-browser
+ clients do not have the same cross-site security concerns).
+
+ Should return True to accept the request or False to reject it.
+ By default, rejects all requests with an origin on a host other
+ than this one.
+
+ This is a security protection against cross site scripting attacks on
+ browsers, since WebSockets are allowed to bypass the usual same-origin
+ policies and don't use CORS headers.
+
+ .. warning::
+
+ This is an important security measure; don't disable it
+ without understanding the security implications. In
+ particular, if your authentication is cookie-based, you
+ must either restrict the origins allowed by
+ ``check_origin()`` or implement your own XSRF-like
+ protection for websocket connections. See `these
+ <https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
+ `articles
+ <https://devcenter.heroku.com/articles/websocket-security>`_
+ for more.
+
+ To accept all cross-origin traffic (which was the default prior to
+ Tornado 4.0), simply override this method to always return true::
+
+ def check_origin(self, origin):
+ return True
+
+ To allow connections from any subdomain of your site, you might
+ do something like::
+
+ def check_origin(self, origin):
+ parsed_origin = urllib.parse.urlparse(origin)
+ return parsed_origin.netloc.endswith(".mydomain.com")
+
+ .. versionadded:: 4.0
+
+ """
+ parsed_origin = urlparse(origin)
+ origin = parsed_origin.netloc
+ origin = origin.lower()
+
+ host = self.request.headers.get("Host")
+
+ # Check to see that origin matches host directly, including ports
+ return origin == host
+
+ def set_nodelay(self, value):
+ """Set the no-delay flag for this stream.
+
+ By default, small messages may be delayed and/or combined to minimize
+ the number of packets sent. This can sometimes cause 200-500ms delays
+ due to the interaction between Nagle's algorithm and TCP delayed
+ ACKs. To reduce this delay (at the expense of possibly increasing
+ bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
+ connection is established.
+
+ See `.BaseIOStream.set_nodelay` for additional details.
+
+ .. versionadded:: 3.1
+ """
+ self.stream.set_nodelay(value)
+
+ def on_connection_close(self):
+ if self.ws_connection:
+ self.ws_connection.on_connection_close()
+ self.ws_connection = None
+ if not self._on_close_called:
+ self._on_close_called = True
+ self.on_close()
+ self._break_cycles()
+
+ def _break_cycles(self):
+ # WebSocketHandlers call finish() early, but we don't want to
+ # break up reference cycles (which makes it impossible to call
+ # self.render_string) until after we've really closed the
+ # connection (if it was established in the first place,
+ # indicated by status code 101).
+ if self.get_status() != 101 or self._on_close_called:
+ super(WebSocketHandler, self)._break_cycles()
+
+ def send_error(self, *args, **kwargs):
+ if self.stream is None:
+ super(WebSocketHandler, self).send_error(*args, **kwargs)
+ else:
+ # If we get an uncaught exception during the handshake,
+ # we have no choice but to abruptly close the connection.
+ # TODO: for uncaught exceptions after the handshake,
+ # we can close the connection more gracefully.
+ self.stream.close()
+
+ def get_websocket_protocol(self):
+ websocket_version = self.request.headers.get("Sec-WebSocket-Version")
+ if websocket_version in ("7", "8", "13"):
+ return WebSocketProtocol13(
+ self, compression_options=self.get_compression_options())
+
+ def _attach_stream(self):
+ self.stream = self.request.connection.detach()
+ self.stream.set_close_callback(self.on_connection_close)
+ # disable non-WS methods
+ for method in ["write", "redirect", "set_header", "set_cookie",
+ "set_status", "flush", "finish"]:
+ setattr(self, method, _raise_not_supported_for_websockets)
+
+
+def _raise_not_supported_for_websockets(*args, **kwargs):
+ raise RuntimeError("Method not supported for Web Sockets")
+
+
+class WebSocketProtocol(object):
+ """Base class for WebSocket protocol versions.
+ """
+ def __init__(self, handler):
+ self.handler = handler
+ self.request = handler.request
+ self.stream = handler.stream
+ self.client_terminated = False
+ self.server_terminated = False
+
+ def _run_callback(self, callback, *args, **kwargs):
+ """Runs the given callback with exception handling.
+
+ If the callback is a coroutine, returns its Future. On error, aborts the
+ websocket connection and returns None.
+ """
+ try:
+ result = callback(*args, **kwargs)
+ except Exception:
+ app_log.error("Uncaught exception in %s",
+ getattr(self.request, 'path', None), exc_info=True)
+ self._abort()
+ else:
+ if result is not None:
+ result = gen.convert_yielded(result)
+ self.stream.io_loop.add_future(result, lambda f: f.result())
+ return result
+
+ def on_connection_close(self):
+ self._abort()
+
+ def _abort(self):
+ """Instantly aborts the WebSocket connection by closing the socket"""
+ self.client_terminated = True
+ self.server_terminated = True
+ self.stream.close() # forcibly tear down the connection
+ self.close() # let the subclass cleanup
+
+
+class _PerMessageDeflateCompressor(object):
+ def __init__(self, persistent, max_wbits, compression_options=None):
+ if max_wbits is None:
+ max_wbits = zlib.MAX_WBITS
+ # There is no symbolic constant for the minimum wbits value.
+ if not (8 <= max_wbits <= zlib.MAX_WBITS):
+ raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
+ max_wbits, zlib.MAX_WBITS)
+ self._max_wbits = max_wbits
+
+ if compression_options is None or 'compression_level' not in compression_options:
+ self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
+ else:
+ self._compression_level = compression_options['compression_level']
+
+ if compression_options is None or 'mem_level' not in compression_options:
+ self._mem_level = 8
+ else:
+ self._mem_level = compression_options['mem_level']
+
+ if persistent:
+ self._compressor = self._create_compressor()
+ else:
+ self._compressor = None
+
+ def _create_compressor(self):
+ return zlib.compressobj(self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level)
+
+ def compress(self, data):
+ compressor = self._compressor or self._create_compressor()
+ data = (compressor.compress(data) +
+ compressor.flush(zlib.Z_SYNC_FLUSH))
+ assert data.endswith(b'\x00\x00\xff\xff')
+ return data[:-4]
+
+
+class _PerMessageDeflateDecompressor(object):
+ def __init__(self, persistent, max_wbits, compression_options=None):
+ if max_wbits is None:
+ max_wbits = zlib.MAX_WBITS
+ if not (8 <= max_wbits <= zlib.MAX_WBITS):
+ raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
+ max_wbits, zlib.MAX_WBITS)
+ self._max_wbits = max_wbits
+ if persistent:
+ self._decompressor = self._create_decompressor()
+ else:
+ self._decompressor = None
+
+ def _create_decompressor(self):
+ return zlib.decompressobj(-self._max_wbits)
+
+ def decompress(self, data):
+ decompressor = self._decompressor or self._create_decompressor()
+ return decompressor.decompress(data + b'\x00\x00\xff\xff')
+
+
+class WebSocketProtocol13(WebSocketProtocol):
+ """Implementation of the WebSocket protocol from RFC 6455.
+
+ This class supports versions 7 and 8 of the protocol in addition to the
+ final version 13.
+ """
+ # Bit masks for the first byte of a frame.
+ FIN = 0x80
+ RSV1 = 0x40
+ RSV2 = 0x20
+ RSV3 = 0x10
+ RSV_MASK = RSV1 | RSV2 | RSV3
+ OPCODE_MASK = 0x0f
+
+ def __init__(self, handler, mask_outgoing=False,
+ compression_options=None):
+ WebSocketProtocol.__init__(self, handler)
+ self.mask_outgoing = mask_outgoing
+ self._final_frame = False
+ self._frame_opcode = None
+ self._masked_frame = None
+ self._frame_mask = None
+ self._frame_length = None
+ self._fragmented_message_buffer = None
+ self._fragmented_message_opcode = None
+ self._waiting = None
+ self._compression_options = compression_options
+ self._decompressor = None
+ self._compressor = None
+ self._frame_compressed = None
+ # The total uncompressed size of all messages received or sent.
+ # Unicode messages are encoded to utf8.
+ # Only for testing; subject to change.
+ self._message_bytes_in = 0
+ self._message_bytes_out = 0
+ # The total size of all packets received or sent. Includes
+ # the effect of compression, frame overhead, and control frames.
+ self._wire_bytes_in = 0
+ self._wire_bytes_out = 0
+ self.ping_callback = None
+ self.last_ping = 0
+ self.last_pong = 0
+
+ def accept_connection(self):
+ try:
+ self._handle_websocket_headers()
+ except ValueError:
+ self.handler.set_status(400)
+ log_msg = "Missing/Invalid WebSocket headers"
+ self.handler.finish(log_msg)
+ gen_log.debug(log_msg)
+ return
+
+ try:
+ self._accept_connection()
+ except ValueError:
+ gen_log.debug("Malformed WebSocket request received",
+ exc_info=True)
+ self._abort()
+ return
+
+ def _handle_websocket_headers(self):
+ """Verifies all invariant- and required headers
+
+ If a header is missing or have an incorrect value ValueError will be
+ raised
+ """
+ fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
+ if not all(map(lambda f: self.request.headers.get(f), fields)):
+ raise ValueError("Missing/Invalid WebSocket headers")
+
+ @staticmethod
+ def compute_accept_value(key):
+ """Computes the value for the Sec-WebSocket-Accept header,
+ given the value for Sec-WebSocket-Key.
+ """
+ sha1 = hashlib.sha1()
+ sha1.update(utf8(key))
+ sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
+ return native_str(base64.b64encode(sha1.digest()))
+
+ def _challenge_response(self):
+ return WebSocketProtocol13.compute_accept_value(
+ self.request.headers.get("Sec-Websocket-Key"))
+
+ def _accept_connection(self):
+ subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
+ subprotocols = [s.strip() for s in subprotocols.split(',')]
+ if subprotocols:
+ selected = self.handler.select_subprotocol(subprotocols)
+ if selected:
+ assert selected in subprotocols
+ self.handler.set_header("Sec-WebSocket-Protocol", selected)
+
+ extensions = self._parse_extensions_header(self.request.headers)
+ for ext in extensions:
+ if (ext[0] == 'permessage-deflate' and
+ self._compression_options is not None):
+ # TODO: negotiate parameters if compression_options
+ # specifies limits.
+ self._create_compressors('server', ext[1], self._compression_options)
+ if ('client_max_window_bits' in ext[1] and
+ ext[1]['client_max_window_bits'] is None):
+ # Don't echo an offered client_max_window_bits
+ # parameter with no value.
+ del ext[1]['client_max_window_bits']
+ self.handler.set_header("Sec-WebSocket-Extensions",
+ httputil._encode_header(
+ 'permessage-deflate', ext[1]))
+ break
+
+ self.handler.clear_header("Content-Type")
+ self.handler.set_status(101)
+ self.handler.set_header("Upgrade", "websocket")
+ self.handler.set_header("Connection", "Upgrade")
+ self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response())
+ self.handler.finish()
+
+ self.handler._attach_stream()
+ self.stream = self.handler.stream
+
+ self.start_pinging()
+ self._run_callback(self.handler.open, *self.handler.open_args,
+ **self.handler.open_kwargs)
+ self._receive_frame()
+
+ def _parse_extensions_header(self, headers):
+ extensions = headers.get("Sec-WebSocket-Extensions", '')
+ if extensions:
+ return [httputil._parse_header(e.strip())
+ for e in extensions.split(',')]
+ return []
+
+ def _process_server_headers(self, key, headers):
+ """Process the headers sent by the server to this client connection.
+
+ 'key' is the websocket handshake challenge/response key.
+ """
+ assert headers['Upgrade'].lower() == 'websocket'
+ assert headers['Connection'].lower() == 'upgrade'
+ accept = self.compute_accept_value(key)
+ assert headers['Sec-Websocket-Accept'] == accept
+
+ extensions = self._parse_extensions_header(headers)
+ for ext in extensions:
+ if (ext[0] == 'permessage-deflate' and
+ self._compression_options is not None):
+ self._create_compressors('client', ext[1])
+ else:
+ raise ValueError("unsupported extension %r", ext)
+
+ def _get_compressor_options(self, side, agreed_parameters, compression_options=None):
+ """Converts a websocket agreed_parameters set to keyword arguments
+ for our compressor objects.
+ """
+ options = dict(
+ persistent=(side + '_no_context_takeover') not in agreed_parameters)
+ wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
+ if wbits_header is None:
+ options['max_wbits'] = zlib.MAX_WBITS
+ else:
+ options['max_wbits'] = int(wbits_header)
+ options['compression_options'] = compression_options
+ return options
+
+ def _create_compressors(self, side, agreed_parameters, compression_options=None):
+ # TODO: handle invalid parameters gracefully
+ allowed_keys = set(['server_no_context_takeover',
+ 'client_no_context_takeover',
+ 'server_max_window_bits',
+ 'client_max_window_bits'])
+ for key in agreed_parameters:
+ if key not in allowed_keys:
+ raise ValueError("unsupported compression parameter %r" % key)
+ other_side = 'client' if (side == 'server') else 'server'
+ self._compressor = _PerMessageDeflateCompressor(
+ **self._get_compressor_options(side, agreed_parameters, compression_options))
+ self._decompressor = _PerMessageDeflateDecompressor(
+ **self._get_compressor_options(other_side, agreed_parameters, compression_options))
+
+ def _write_frame(self, fin, opcode, data, flags=0):
+ if fin:
+ finbit = self.FIN
+ else:
+ finbit = 0
+ frame = struct.pack("B", finbit | opcode | flags)
+ l = len(data)
+ if self.mask_outgoing:
+ mask_bit = 0x80
+ else:
+ mask_bit = 0
+ if l < 126:
+ frame += struct.pack("B", l | mask_bit)
+ elif l <= 0xFFFF:
+ frame += struct.pack("!BH", 126 | mask_bit, l)
+ else:
+ frame += struct.pack("!BQ", 127 | mask_bit, l)
+ if self.mask_outgoing:
+ mask = os.urandom(4)
+ data = mask + _websocket_mask(mask, data)
+ frame += data
+ self._wire_bytes_out += len(frame)
+ try:
+ return self.stream.write(frame)
+ except StreamClosedError:
+ self._abort()
+
+ def write_message(self, message, binary=False):
+ """Sends the given message to the client of this Web Socket."""
+ if binary:
+ opcode = 0x2
+ else:
+ opcode = 0x1
+ message = tornado.escape.utf8(message)
+ assert isinstance(message, bytes)
+ self._message_bytes_out += len(message)
+ flags = 0
+ if self._compressor:
+ message = self._compressor.compress(message)
+ flags |= self.RSV1
+ return self._write_frame(True, opcode, message, flags=flags)
+
+ def write_ping(self, data):
+ """Send ping frame."""
+ assert isinstance(data, bytes)
+ self._write_frame(True, 0x9, data)
+
+ def _receive_frame(self):
+ try:
+ self.stream.read_bytes(2, self._on_frame_start)
+ except StreamClosedError:
+ self._abort()
+
+ def _on_frame_start(self, data):
+ self._wire_bytes_in += len(data)
+ header, payloadlen = struct.unpack("BB", data)
+ self._final_frame = header & self.FIN
+ reserved_bits = header & self.RSV_MASK
+ self._frame_opcode = header & self.OPCODE_MASK
+ self._frame_opcode_is_control = self._frame_opcode & 0x8
+ if self._decompressor is not None and self._frame_opcode != 0:
+ self._frame_compressed = bool(reserved_bits & self.RSV1)
+ reserved_bits &= ~self.RSV1
+ if reserved_bits:
+ # client is using as-yet-undefined extensions; abort
+ self._abort()
+ return
+ self._masked_frame = bool(payloadlen & 0x80)
+ payloadlen = payloadlen & 0x7f
+ if self._frame_opcode_is_control and payloadlen >= 126:
+ # control frames must have payload < 126
+ self._abort()
+ return
+ try:
+ if payloadlen < 126:
+ self._frame_length = payloadlen
+ if self._masked_frame:
+ self.stream.read_bytes(4, self._on_masking_key)
+ else:
+ self._read_frame_data(False)
+ elif payloadlen == 126:
+ self.stream.read_bytes(2, self._on_frame_length_16)
+ elif payloadlen == 127:
+ self.stream.read_bytes(8, self._on_frame_length_64)
+ except StreamClosedError:
+ self._abort()
+
+ def _read_frame_data(self, masked):
+ new_len = self._frame_length
+ if self._fragmented_message_buffer is not None:
+ new_len += len(self._fragmented_message_buffer)
+ if new_len > (self.handler.max_message_size or 10 * 1024 * 1024):
+ self.close(1009, "message too big")
+ return
+ self.stream.read_bytes(
+ self._frame_length,
+ self._on_masked_frame_data if masked else self._on_frame_data)
+
+ def _on_frame_length_16(self, data):
+ self._wire_bytes_in += len(data)
+ self._frame_length = struct.unpack("!H", data)[0]
+ try:
+ if self._masked_frame:
+ self.stream.read_bytes(4, self._on_masking_key)
+ else:
+ self._read_frame_data(False)
+ except StreamClosedError:
+ self._abort()
+
+ def _on_frame_length_64(self, data):
+ self._wire_bytes_in += len(data)
+ self._frame_length = struct.unpack("!Q", data)[0]
+ try:
+ if self._masked_frame:
+ self.stream.read_bytes(4, self._on_masking_key)
+ else:
+ self._read_frame_data(False)
+ except StreamClosedError:
+ self._abort()
+
+ def _on_masking_key(self, data):
+ self._wire_bytes_in += len(data)
+ self._frame_mask = data
+ try:
+ self._read_frame_data(True)
+ except StreamClosedError:
+ self._abort()
+
+ def _on_masked_frame_data(self, data):
+ # Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
+ self._on_frame_data(_websocket_mask(self._frame_mask, data))
+
+ def _on_frame_data(self, data):
+ handled_future = None
+
+ self._wire_bytes_in += len(data)
+ if self._frame_opcode_is_control:
+ # control frames may be interleaved with a series of fragmented
+ # data frames, so control frames must not interact with
+ # self._fragmented_*
+ if not self._final_frame:
+ # control frames must not be fragmented
+ self._abort()
+ return
+ opcode = self._frame_opcode
+ elif self._frame_opcode == 0: # continuation frame
+ if self._fragmented_message_buffer is None:
+ # nothing to continue
+ self._abort()
+ return
+ self._fragmented_message_buffer += data
+ if self._final_frame:
+ opcode = self._fragmented_message_opcode
+ data = self._fragmented_message_buffer
+ self._fragmented_message_buffer = None
+ else: # start of new data message
+ if self._fragmented_message_buffer is not None:
+ # can't start new message until the old one is finished
+ self._abort()
+ return
+ if self._final_frame:
+ opcode = self._frame_opcode
+ else:
+ self._fragmented_message_opcode = self._frame_opcode
+ self._fragmented_message_buffer = data
+
+ if self._final_frame:
+ handled_future = self._handle_message(opcode, data)
+
+ if not self.client_terminated:
+ if handled_future:
+ # on_message is a coroutine, process more frames once it's done.
+ handled_future.add_done_callback(
+ lambda future: self._receive_frame())
+ else:
+ self._receive_frame()
+
+ def _handle_message(self, opcode, data):
+ """Execute on_message, returning its Future if it is a coroutine."""
+ if self.client_terminated:
+ return
+
+ if self._frame_compressed:
+ data = self._decompressor.decompress(data)
+
+ if opcode == 0x1:
+ # UTF-8 data
+ self._message_bytes_in += len(data)
+ try:
+ decoded = data.decode("utf-8")
+ except UnicodeDecodeError:
+ self._abort()
+ return
+ return self._run_callback(self.handler.on_message, decoded)
+ elif opcode == 0x2:
+ # Binary data
+ self._message_bytes_in += len(data)
+ return self._run_callback(self.handler.on_message, data)
+ elif opcode == 0x8:
+ # Close
+ self.client_terminated = True
+ if len(data) >= 2:
+ self.handler.close_code = struct.unpack('>H', data[:2])[0]
+ if len(data) > 2:
+ self.handler.close_reason = to_unicode(data[2:])
+ # Echo the received close code, if any (RFC 6455 section 5.5.1).
+ self.close(self.handler.close_code)
+ elif opcode == 0x9:
+ # Ping
+ self._write_frame(True, 0xA, data)
+ self._run_callback(self.handler.on_ping, data)
+ elif opcode == 0xA:
+ # Pong
+ self.last_pong = IOLoop.current().time()
+ return self._run_callback(self.handler.on_pong, data)
+ else:
+ self._abort()
+
+ def close(self, code=None, reason=None):
+ """Closes the WebSocket connection."""
+ if not self.server_terminated:
+ if not self.stream.closed():
+ if code is None and reason is not None:
+ code = 1000 # "normal closure" status code
+ if code is None:
+ close_data = b''
+ else:
+ close_data = struct.pack('>H', code)
+ if reason is not None:
+ close_data += utf8(reason)
+ self._write_frame(True, 0x8, close_data)
+ self.server_terminated = True
+ if self.client_terminated:
+ if self._waiting is not None:
+ self.stream.io_loop.remove_timeout(self._waiting)
+ self._waiting = None
+ self.stream.close()
+ elif self._waiting is None:
+ # Give the client a few seconds to complete a clean shutdown,
+ # otherwise just close the connection.
+ self._waiting = self.stream.io_loop.add_timeout(
+ self.stream.io_loop.time() + 5, self._abort)
+
+ @property
+ def ping_interval(self):
+ interval = self.handler.ping_interval
+ if interval is not None:
+ return interval
+ return 0
+
+ @property
+ def ping_timeout(self):
+ timeout = self.handler.ping_timeout
+ if timeout is not None:
+ return timeout
+ return max(3 * self.ping_interval, 30)
+
+ def start_pinging(self):
+ """Start sending periodic pings to keep the connection alive"""
+ if self.ping_interval > 0:
+ self.last_ping = self.last_pong = IOLoop.current().time()
+ self.ping_callback = PeriodicCallback(
+ self.periodic_ping, self.ping_interval * 1000)
+ self.ping_callback.start()
+
+ def periodic_ping(self):
+ """Send a ping to keep the websocket alive
+
+ Called periodically if the websocket_ping_interval is set and non-zero.
+ """
+ if self.stream.closed() and self.ping_callback is not None:
+ self.ping_callback.stop()
+ return
+
+ # Check for timeout on pong. Make sure that we really have
+ # sent a recent ping in case the machine with both server and
+ # client has been suspended since the last ping.
+ now = IOLoop.current().time()
+ since_last_pong = now - self.last_pong
+ since_last_ping = now - self.last_ping
+ if (since_last_ping < 2 * self.ping_interval and
+ since_last_pong > self.ping_timeout):
+ self.close()
+ return
+
+ self.write_ping(b'')
+ self.last_ping = now
+
+
+class WebSocketClientConnection(simple_httpclient._HTTPConnection):
+ """WebSocket client connection.
+
+ This class should not be instantiated directly; use the
+ `websocket_connect` function instead.
+ """
+ def __init__(self, io_loop, request, on_message_callback=None,
+ compression_options=None, ping_interval=None, ping_timeout=None,
+ max_message_size=None):
+ self.compression_options = compression_options
+ self.connect_future = TracebackFuture()
+ self.protocol = None
+ self.read_future = None
+ self.read_queue = collections.deque()
+ self.key = base64.b64encode(os.urandom(16))
+ self._on_message_callback = on_message_callback
+ self.close_code = self.close_reason = None
+ self.ping_interval = ping_interval
+ self.ping_timeout = ping_timeout
+ self.max_message_size = max_message_size
+
+ scheme, sep, rest = request.url.partition(':')
+ scheme = {'ws': 'http', 'wss': 'https'}[scheme]
+ request.url = scheme + sep + rest
+ request.headers.update({
+ 'Upgrade': 'websocket',
+ 'Connection': 'Upgrade',
+ 'Sec-WebSocket-Key': self.key,
+ 'Sec-WebSocket-Version': '13',
+ })
+ if self.compression_options is not None:
+ # Always offer to let the server set our max_wbits (and even though
+ # we don't offer it, we will accept a client_no_context_takeover
+ # from the server).
+ # TODO: set server parameters for deflate extension
+ # if requested in self.compression_options.
+ request.headers['Sec-WebSocket-Extensions'] = (
+ 'permessage-deflate; client_max_window_bits')
+
+ self.tcp_client = TCPClient(io_loop=io_loop)
+ super(WebSocketClientConnection, self).__init__(
+ io_loop, None, request, lambda: None, self._on_http_response,
+ 104857600, self.tcp_client, 65536, 104857600)
+
+ def close(self, code=None, reason=None):
+ """Closes the websocket connection.
+
+ ``code`` and ``reason`` are documented under
+ `WebSocketHandler.close`.
+
+ .. versionadded:: 3.2
+
+ .. versionchanged:: 4.0
+
+ Added the ``code`` and ``reason`` arguments.
+ """
+ if self.protocol is not None:
+ self.protocol.close(code, reason)
+ self.protocol = None
+
+ def on_connection_close(self):
+ if not self.connect_future.done():
+ self.connect_future.set_exception(StreamClosedError())
+ self.on_message(None)
+ self.tcp_client.close()
+ super(WebSocketClientConnection, self).on_connection_close()
+
+ def _on_http_response(self, response):
+ if not self.connect_future.done():
+ if response.error:
+ self.connect_future.set_exception(response.error)
+ else:
+ self.connect_future.set_exception(WebSocketError(
+ "Non-websocket response"))
+
+ def headers_received(self, start_line, headers):
+ if start_line.code != 101:
+ return super(WebSocketClientConnection, self).headers_received(
+ start_line, headers)
+
+ self.headers = headers
+ self.protocol = self.get_websocket_protocol()
+ self.protocol._process_server_headers(self.key, self.headers)
+ self.protocol.start_pinging()
+ self.protocol._receive_frame()
+
+ if self._timeout is not None:
+ self.io_loop.remove_timeout(self._timeout)
+ self._timeout = None
+
+ self.stream = self.connection.detach()
+ self.stream.set_close_callback(self.on_connection_close)
+ # Once we've taken over the connection, clear the final callback
+ # we set on the http request. This deactivates the error handling
+ # in simple_httpclient that would otherwise interfere with our
+ # ability to see exceptions.
+ self.final_callback = None
+
+ self.connect_future.set_result(self)
+
+ def write_message(self, message, binary=False):
+ """Sends a message to the WebSocket server."""
+ return self.protocol.write_message(message, binary)
+
+ def read_message(self, callback=None):
+ """Reads a message from the WebSocket server.
+
+ If on_message_callback was specified at WebSocket
+ initialization, this function will never return messages
+
+ Returns a future whose result is the message, or None
+ if the connection is closed. If a callback argument
+ is given it will be called with the future when it is
+ ready.
+ """
+ assert self.read_future is None
+ future = TracebackFuture()
+ if self.read_queue:
+ future.set_result(self.read_queue.popleft())
+ else:
+ self.read_future = future
+ if callback is not None:
+ self.io_loop.add_future(future, callback)
+ return future
+
+ def on_message(self, message):
+ if self._on_message_callback:
+ self._on_message_callback(message)
+ elif self.read_future is not None:
+ self.read_future.set_result(message)
+ self.read_future = None
+ else:
+ self.read_queue.append(message)
+
+ def on_pong(self, data):
+ pass
+
+ def on_ping(self, data):
+ pass
+
+ def get_websocket_protocol(self):
+ return WebSocketProtocol13(self, mask_outgoing=True,
+ compression_options=self.compression_options)
+
+
+def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
+ on_message_callback=None, compression_options=None,
+ ping_interval=None, ping_timeout=None,
+ max_message_size=None):
+ """Client-side websocket support.
+
+ Takes a url and returns a Future whose result is a
+ `WebSocketClientConnection`.
+
+ ``compression_options`` is interpreted in the same way as the
+ return value of `.WebSocketHandler.get_compression_options`.
+
+ The connection supports two styles of operation. In the coroutine
+ style, the application typically calls
+ `~.WebSocketClientConnection.read_message` in a loop::
+
+ conn = yield websocket_connect(url)
+ while True:
+ msg = yield conn.read_message()
+ if msg is None: break
+ # Do something with msg
+
+ In the callback style, pass an ``on_message_callback`` to
+ ``websocket_connect``. In both styles, a message of ``None``
+ indicates that the connection has been closed.
+
+ .. versionchanged:: 3.2
+ Also accepts ``HTTPRequest`` objects in place of urls.
+
+ .. versionchanged:: 4.1
+ Added ``compression_options`` and ``on_message_callback``.
+ The ``io_loop`` argument is deprecated.
+
+ .. versionchanged:: 4.5
+ Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
+ arguments, which have the same meaning as in `WebSocketHandler`.
+ """
+ if io_loop is None:
+ io_loop = IOLoop.current()
+ if isinstance(url, httpclient.HTTPRequest):
+ assert connect_timeout is None
+ request = url
+ # Copy and convert the headers dict/object (see comments in
+ # AsyncHTTPClient.fetch)
+ request.headers = httputil.HTTPHeaders(request.headers)
+ else:
+ request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
+ request = httpclient._RequestProxy(
+ request, httpclient.HTTPRequest._DEFAULTS)
+ conn = WebSocketClientConnection(io_loop, request,
+ on_message_callback=on_message_callback,
+ compression_options=compression_options,
+ ping_interval=ping_interval,
+ ping_timeout=ping_timeout,
+ max_message_size=max_message_size)
+ if callback is not None:
+ io_loop.add_future(conn.connect_future, callback)
+ return conn.connect_future
diff --git a/contrib/python/tornado/tornado-4/tornado/wsgi.py b/contrib/python/tornado/tornado-4/tornado/wsgi.py
index 68a7615a0e..4e4631e306 100644
--- a/contrib/python/tornado/tornado-4/tornado/wsgi.py
+++ b/contrib/python/tornado/tornado-4/tornado/wsgi.py
@@ -1,358 +1,358 @@
-#!/usr/bin/env python
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""WSGI support for the Tornado web framework.
-
-WSGI is the Python standard for web servers, and allows for interoperability
-between Tornado and other Python web frameworks and servers. This module
-provides WSGI support in two ways:
-
-* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
- interface. This is useful for running a Tornado app on another
- HTTP server, such as Google App Engine. See the `WSGIAdapter` class
- documentation for limitations that apply.
-* `WSGIContainer` lets you run other WSGI applications and frameworks on the
- Tornado HTTP server. For example, with this class you can mix Django
- and Tornado handlers in a single server.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import sys
-from io import BytesIO
-import tornado
-
-from tornado.concurrent import Future
-from tornado import escape
-from tornado import httputil
-from tornado.log import access_log
-from tornado import web
-from tornado.escape import native_str
-from tornado.util import unicode_type, PY3
-
-
-if PY3:
- import urllib.parse as urllib_parse # py3
-else:
- import urllib as urllib_parse
-
-# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
-# that are smuggled inside objects of type unicode (via the latin1 encoding).
-# These functions are like those in the tornado.escape module, but defined
-# here to minimize the temptation to use them in non-wsgi contexts.
-if str is unicode_type:
- def to_wsgi_str(s):
- assert isinstance(s, bytes)
- return s.decode('latin1')
-
- def from_wsgi_str(s):
- assert isinstance(s, str)
- return s.encode('latin1')
-else:
- def to_wsgi_str(s):
- assert isinstance(s, bytes)
- return s
-
- def from_wsgi_str(s):
- assert isinstance(s, str)
- return s
-
-
-class WSGIApplication(web.Application):
- """A WSGI equivalent of `tornado.web.Application`.
-
- .. deprecated:: 4.0
-
- Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
- """
- def __call__(self, environ, start_response):
- return WSGIAdapter(self)(environ, start_response)
-
-
-# WSGI has no facilities for flow control, so just return an already-done
-# Future when the interface requires it.
-_dummy_future = Future()
-_dummy_future.set_result(None)
-
-
-class _WSGIConnection(httputil.HTTPConnection):
- def __init__(self, method, start_response, context):
- self.method = method
- self.start_response = start_response
- self.context = context
- self._write_buffer = []
- self._finished = False
- self._expected_content_remaining = None
- self._error = None
-
- def set_close_callback(self, callback):
- # WSGI has no facility for detecting a closed connection mid-request,
- # so we can simply ignore the callback.
- pass
-
- def write_headers(self, start_line, headers, chunk=None, callback=None):
- if self.method == 'HEAD':
- self._expected_content_remaining = 0
- elif 'Content-Length' in headers:
- self._expected_content_remaining = int(headers['Content-Length'])
- else:
- self._expected_content_remaining = None
- self.start_response(
- '%s %s' % (start_line.code, start_line.reason),
- [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
- if chunk is not None:
- self.write(chunk, callback)
- elif callback is not None:
- callback()
- return _dummy_future
-
- def write(self, chunk, callback=None):
- if self._expected_content_remaining is not None:
- self._expected_content_remaining -= len(chunk)
- if self._expected_content_remaining < 0:
- self._error = httputil.HTTPOutputError(
- "Tried to write more data than Content-Length")
- raise self._error
- self._write_buffer.append(chunk)
- if callback is not None:
- callback()
- return _dummy_future
-
- def finish(self):
- if (self._expected_content_remaining is not None and
- self._expected_content_remaining != 0):
- self._error = httputil.HTTPOutputError(
- "Tried to write %d bytes less than Content-Length" %
- self._expected_content_remaining)
- raise self._error
- self._finished = True
-
-
-class _WSGIRequestContext(object):
- def __init__(self, remote_ip, protocol):
- self.remote_ip = remote_ip
- self.protocol = protocol
-
- def __str__(self):
- return self.remote_ip
-
-
-class WSGIAdapter(object):
- """Converts a `tornado.web.Application` instance into a WSGI application.
-
- Example usage::
-
- import tornado.web
- import tornado.wsgi
- import wsgiref.simple_server
-
- class MainHandler(tornado.web.RequestHandler):
- def get(self):
- self.write("Hello, world")
-
- if __name__ == "__main__":
- application = tornado.web.Application([
- (r"/", MainHandler),
- ])
- wsgi_app = tornado.wsgi.WSGIAdapter(application)
- server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
- server.serve_forever()
-
- See the `appengine demo
- <https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
- for an example of using this module to run a Tornado app on Google
- App Engine.
-
- In WSGI mode asynchronous methods are not supported. This means
- that it is not possible to use `.AsyncHTTPClient`, or the
- `tornado.auth` or `tornado.websocket` modules.
-
- .. versionadded:: 4.0
- """
- def __init__(self, application):
- if isinstance(application, WSGIApplication):
- self.application = lambda request: web.Application.__call__(
- application, request)
- else:
- self.application = application
-
- def __call__(self, environ, start_response):
- method = environ["REQUEST_METHOD"]
- uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
- uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
- if environ.get("QUERY_STRING"):
- uri += "?" + environ["QUERY_STRING"]
- headers = httputil.HTTPHeaders()
- if environ.get("CONTENT_TYPE"):
- headers["Content-Type"] = environ["CONTENT_TYPE"]
- if environ.get("CONTENT_LENGTH"):
- headers["Content-Length"] = environ["CONTENT_LENGTH"]
- for key in environ:
- if key.startswith("HTTP_"):
- headers[key[5:].replace("_", "-")] = environ[key]
- if headers.get("Content-Length"):
- body = environ["wsgi.input"].read(
- int(headers["Content-Length"]))
- else:
- body = b""
- protocol = environ["wsgi.url_scheme"]
- remote_ip = environ.get("REMOTE_ADDR", "")
- if environ.get("HTTP_HOST"):
- host = environ["HTTP_HOST"]
- else:
- host = environ["SERVER_NAME"]
- connection = _WSGIConnection(method, start_response,
- _WSGIRequestContext(remote_ip, protocol))
- request = httputil.HTTPServerRequest(
- method, uri, "HTTP/1.1", headers=headers, body=body,
- host=host, connection=connection)
- request._parse_body()
- self.application(request)
- if connection._error:
- raise connection._error
- if not connection._finished:
- raise Exception("request did not finish synchronously")
- return connection._write_buffer
-
-
-class WSGIContainer(object):
- r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
-
- .. warning::
-
- WSGI is a *synchronous* interface, while Tornado's concurrency model
- is based on single-threaded asynchronous execution. This means that
- running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
- than running the same app in a multi-threaded WSGI server like
- ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
- benefits to combining Tornado and WSGI in the same process that
- outweigh the reduced scalability.
-
- Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
- run it. For example::
-
- def simple_app(environ, start_response):
- status = "200 OK"
- response_headers = [("Content-type", "text/plain")]
- start_response(status, response_headers)
- return ["Hello world!\n"]
-
- container = tornado.wsgi.WSGIContainer(simple_app)
- http_server = tornado.httpserver.HTTPServer(container)
- http_server.listen(8888)
- tornado.ioloop.IOLoop.current().start()
-
- This class is intended to let other frameworks (Django, web.py, etc)
- run on the Tornado HTTP server and I/O loop.
-
- The `tornado.web.FallbackHandler` class is often useful for mixing
- Tornado and WSGI apps in the same server. See
- https://github.com/bdarnell/django-tornado-demo for a complete example.
- """
- def __init__(self, wsgi_application):
- self.wsgi_application = wsgi_application
-
- def __call__(self, request):
- data = {}
- response = []
-
- def start_response(status, response_headers, exc_info=None):
- data["status"] = status
- data["headers"] = response_headers
- return response.append
- app_response = self.wsgi_application(
- WSGIContainer.environ(request), start_response)
- try:
- response.extend(app_response)
- body = b"".join(response)
- finally:
- if hasattr(app_response, "close"):
- app_response.close()
- if not data:
- raise Exception("WSGI app did not call start_response")
-
- status_code, reason = data["status"].split(' ', 1)
- status_code = int(status_code)
- headers = data["headers"]
- header_set = set(k.lower() for (k, v) in headers)
- body = escape.utf8(body)
- if status_code != 304:
- if "content-length" not in header_set:
- headers.append(("Content-Length", str(len(body))))
- if "content-type" not in header_set:
- headers.append(("Content-Type", "text/html; charset=UTF-8"))
- if "server" not in header_set:
- headers.append(("Server", "TornadoServer/%s" % tornado.version))
-
- start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
- header_obj = httputil.HTTPHeaders()
- for key, value in headers:
- header_obj.add(key, value)
- request.connection.write_headers(start_line, header_obj, chunk=body)
- request.connection.finish()
- self._log(status_code, request)
-
- @staticmethod
- def environ(request):
- """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
- """
- hostport = request.host.split(":")
- if len(hostport) == 2:
- host = hostport[0]
- port = int(hostport[1])
- else:
- host = request.host
- port = 443 if request.protocol == "https" else 80
- environ = {
- "REQUEST_METHOD": request.method,
- "SCRIPT_NAME": "",
- "PATH_INFO": to_wsgi_str(escape.url_unescape(
- request.path, encoding=None, plus=False)),
- "QUERY_STRING": request.query,
- "REMOTE_ADDR": request.remote_ip,
- "SERVER_NAME": host,
- "SERVER_PORT": str(port),
- "SERVER_PROTOCOL": request.version,
- "wsgi.version": (1, 0),
- "wsgi.url_scheme": request.protocol,
- "wsgi.input": BytesIO(escape.utf8(request.body)),
- "wsgi.errors": sys.stderr,
- "wsgi.multithread": False,
- "wsgi.multiprocess": True,
- "wsgi.run_once": False,
- }
- if "Content-Type" in request.headers:
- environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
- if "Content-Length" in request.headers:
- environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
- for key, value in request.headers.items():
- environ["HTTP_" + key.replace("-", "_").upper()] = value
- return environ
-
- def _log(self, status_code, request):
- if status_code < 400:
- log_method = access_log.info
- elif status_code < 500:
- log_method = access_log.warning
- else:
- log_method = access_log.error
- request_time = 1000.0 * request.request_time()
- summary = request.method + " " + request.uri + " (" + \
- request.remote_ip + ")"
- log_method("%d %s %.2fms", status_code, summary, request_time)
-
-
-HTTPRequest = httputil.HTTPServerRequest
+#!/usr/bin/env python
+#
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""WSGI support for the Tornado web framework.
+
+WSGI is the Python standard for web servers, and allows for interoperability
+between Tornado and other Python web frameworks and servers. This module
+provides WSGI support in two ways:
+
+* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
+ interface. This is useful for running a Tornado app on another
+ HTTP server, such as Google App Engine. See the `WSGIAdapter` class
+ documentation for limitations that apply.
+* `WSGIContainer` lets you run other WSGI applications and frameworks on the
+ Tornado HTTP server. For example, with this class you can mix Django
+ and Tornado handlers in a single server.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+from io import BytesIO
+import tornado
+
+from tornado.concurrent import Future
+from tornado import escape
+from tornado import httputil
+from tornado.log import access_log
+from tornado import web
+from tornado.escape import native_str
+from tornado.util import unicode_type, PY3
+
+
+if PY3:
+ import urllib.parse as urllib_parse # py3
+else:
+ import urllib as urllib_parse
+
+# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
+# that are smuggled inside objects of type unicode (via the latin1 encoding).
+# These functions are like those in the tornado.escape module, but defined
+# here to minimize the temptation to use them in non-wsgi contexts.
+if str is unicode_type:
+ def to_wsgi_str(s):
+ assert isinstance(s, bytes)
+ return s.decode('latin1')
+
+ def from_wsgi_str(s):
+ assert isinstance(s, str)
+ return s.encode('latin1')
+else:
+ def to_wsgi_str(s):
+ assert isinstance(s, bytes)
+ return s
+
+ def from_wsgi_str(s):
+ assert isinstance(s, str)
+ return s
+
+
+class WSGIApplication(web.Application):
+ """A WSGI equivalent of `tornado.web.Application`.
+
+ .. deprecated:: 4.0
+
+ Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
+ """
+ def __call__(self, environ, start_response):
+ return WSGIAdapter(self)(environ, start_response)
+
+
+# WSGI has no facilities for flow control, so just return an already-done
+# Future when the interface requires it.
+_dummy_future = Future()
+_dummy_future.set_result(None)
+
+
+class _WSGIConnection(httputil.HTTPConnection):
+ def __init__(self, method, start_response, context):
+ self.method = method
+ self.start_response = start_response
+ self.context = context
+ self._write_buffer = []
+ self._finished = False
+ self._expected_content_remaining = None
+ self._error = None
+
+ def set_close_callback(self, callback):
+ # WSGI has no facility for detecting a closed connection mid-request,
+ # so we can simply ignore the callback.
+ pass
+
+ def write_headers(self, start_line, headers, chunk=None, callback=None):
+ if self.method == 'HEAD':
+ self._expected_content_remaining = 0
+ elif 'Content-Length' in headers:
+ self._expected_content_remaining = int(headers['Content-Length'])
+ else:
+ self._expected_content_remaining = None
+ self.start_response(
+ '%s %s' % (start_line.code, start_line.reason),
+ [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
+ if chunk is not None:
+ self.write(chunk, callback)
+ elif callback is not None:
+ callback()
+ return _dummy_future
+
+ def write(self, chunk, callback=None):
+ if self._expected_content_remaining is not None:
+ self._expected_content_remaining -= len(chunk)
+ if self._expected_content_remaining < 0:
+ self._error = httputil.HTTPOutputError(
+ "Tried to write more data than Content-Length")
+ raise self._error
+ self._write_buffer.append(chunk)
+ if callback is not None:
+ callback()
+ return _dummy_future
+
+ def finish(self):
+ if (self._expected_content_remaining is not None and
+ self._expected_content_remaining != 0):
+ self._error = httputil.HTTPOutputError(
+ "Tried to write %d bytes less than Content-Length" %
+ self._expected_content_remaining)
+ raise self._error
+ self._finished = True
+
+
+class _WSGIRequestContext(object):
+ def __init__(self, remote_ip, protocol):
+ self.remote_ip = remote_ip
+ self.protocol = protocol
+
+ def __str__(self):
+ return self.remote_ip
+
+
+class WSGIAdapter(object):
+ """Converts a `tornado.web.Application` instance into a WSGI application.
+
+ Example usage::
+
+ import tornado.web
+ import tornado.wsgi
+ import wsgiref.simple_server
+
+ class MainHandler(tornado.web.RequestHandler):
+ def get(self):
+ self.write("Hello, world")
+
+ if __name__ == "__main__":
+ application = tornado.web.Application([
+ (r"/", MainHandler),
+ ])
+ wsgi_app = tornado.wsgi.WSGIAdapter(application)
+ server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
+ server.serve_forever()
+
+ See the `appengine demo
+ <https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
+ for an example of using this module to run a Tornado app on Google
+ App Engine.
+
+ In WSGI mode asynchronous methods are not supported. This means
+ that it is not possible to use `.AsyncHTTPClient`, or the
+ `tornado.auth` or `tornado.websocket` modules.
+
+ .. versionadded:: 4.0
+ """
+ def __init__(self, application):
+ if isinstance(application, WSGIApplication):
+ self.application = lambda request: web.Application.__call__(
+ application, request)
+ else:
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ method = environ["REQUEST_METHOD"]
+ uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
+ uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
+ if environ.get("QUERY_STRING"):
+ uri += "?" + environ["QUERY_STRING"]
+ headers = httputil.HTTPHeaders()
+ if environ.get("CONTENT_TYPE"):
+ headers["Content-Type"] = environ["CONTENT_TYPE"]
+ if environ.get("CONTENT_LENGTH"):
+ headers["Content-Length"] = environ["CONTENT_LENGTH"]
+ for key in environ:
+ if key.startswith("HTTP_"):
+ headers[key[5:].replace("_", "-")] = environ[key]
+ if headers.get("Content-Length"):
+ body = environ["wsgi.input"].read(
+ int(headers["Content-Length"]))
+ else:
+ body = b""
+ protocol = environ["wsgi.url_scheme"]
+ remote_ip = environ.get("REMOTE_ADDR", "")
+ if environ.get("HTTP_HOST"):
+ host = environ["HTTP_HOST"]
+ else:
+ host = environ["SERVER_NAME"]
+ connection = _WSGIConnection(method, start_response,
+ _WSGIRequestContext(remote_ip, protocol))
+ request = httputil.HTTPServerRequest(
+ method, uri, "HTTP/1.1", headers=headers, body=body,
+ host=host, connection=connection)
+ request._parse_body()
+ self.application(request)
+ if connection._error:
+ raise connection._error
+ if not connection._finished:
+ raise Exception("request did not finish synchronously")
+ return connection._write_buffer
+
+
+class WSGIContainer(object):
+ r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
+
+ .. warning::
+
+ WSGI is a *synchronous* interface, while Tornado's concurrency model
+ is based on single-threaded asynchronous execution. This means that
+ running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
+ than running the same app in a multi-threaded WSGI server like
+ ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
+ benefits to combining Tornado and WSGI in the same process that
+ outweigh the reduced scalability.
+
+ Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
+ run it. For example::
+
+ def simple_app(environ, start_response):
+ status = "200 OK"
+ response_headers = [("Content-type", "text/plain")]
+ start_response(status, response_headers)
+ return ["Hello world!\n"]
+
+ container = tornado.wsgi.WSGIContainer(simple_app)
+ http_server = tornado.httpserver.HTTPServer(container)
+ http_server.listen(8888)
+ tornado.ioloop.IOLoop.current().start()
+
+ This class is intended to let other frameworks (Django, web.py, etc)
+ run on the Tornado HTTP server and I/O loop.
+
+ The `tornado.web.FallbackHandler` class is often useful for mixing
+ Tornado and WSGI apps in the same server. See
+ https://github.com/bdarnell/django-tornado-demo for a complete example.
+ """
+ def __init__(self, wsgi_application):
+ self.wsgi_application = wsgi_application
+
+ def __call__(self, request):
+ data = {}
+ response = []
+
+ def start_response(status, response_headers, exc_info=None):
+ data["status"] = status
+ data["headers"] = response_headers
+ return response.append
+ app_response = self.wsgi_application(
+ WSGIContainer.environ(request), start_response)
+ try:
+ response.extend(app_response)
+ body = b"".join(response)
+ finally:
+ if hasattr(app_response, "close"):
+ app_response.close()
+ if not data:
+ raise Exception("WSGI app did not call start_response")
+
+ status_code, reason = data["status"].split(' ', 1)
+ status_code = int(status_code)
+ headers = data["headers"]
+ header_set = set(k.lower() for (k, v) in headers)
+ body = escape.utf8(body)
+ if status_code != 304:
+ if "content-length" not in header_set:
+ headers.append(("Content-Length", str(len(body))))
+ if "content-type" not in header_set:
+ headers.append(("Content-Type", "text/html; charset=UTF-8"))
+ if "server" not in header_set:
+ headers.append(("Server", "TornadoServer/%s" % tornado.version))
+
+ start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
+ header_obj = httputil.HTTPHeaders()
+ for key, value in headers:
+ header_obj.add(key, value)
+ request.connection.write_headers(start_line, header_obj, chunk=body)
+ request.connection.finish()
+ self._log(status_code, request)
+
+ @staticmethod
+ def environ(request):
+ """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
+ """
+ hostport = request.host.split(":")
+ if len(hostport) == 2:
+ host = hostport[0]
+ port = int(hostport[1])
+ else:
+ host = request.host
+ port = 443 if request.protocol == "https" else 80
+ environ = {
+ "REQUEST_METHOD": request.method,
+ "SCRIPT_NAME": "",
+ "PATH_INFO": to_wsgi_str(escape.url_unescape(
+ request.path, encoding=None, plus=False)),
+ "QUERY_STRING": request.query,
+ "REMOTE_ADDR": request.remote_ip,
+ "SERVER_NAME": host,
+ "SERVER_PORT": str(port),
+ "SERVER_PROTOCOL": request.version,
+ "wsgi.version": (1, 0),
+ "wsgi.url_scheme": request.protocol,
+ "wsgi.input": BytesIO(escape.utf8(request.body)),
+ "wsgi.errors": sys.stderr,
+ "wsgi.multithread": False,
+ "wsgi.multiprocess": True,
+ "wsgi.run_once": False,
+ }
+ if "Content-Type" in request.headers:
+ environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
+ if "Content-Length" in request.headers:
+ environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
+ for key, value in request.headers.items():
+ environ["HTTP_" + key.replace("-", "_").upper()] = value
+ return environ
+
+ def _log(self, status_code, request):
+ if status_code < 400:
+ log_method = access_log.info
+ elif status_code < 500:
+ log_method = access_log.warning
+ else:
+ log_method = access_log.error
+ request_time = 1000.0 * request.request_time()
+ summary = request.method + " " + request.uri + " (" + \
+ request.remote_ip + ")"
+ log_method("%d %s %.2fms", status_code, summary, request_time)
+
+
+HTTPRequest = httputil.HTTPServerRequest
diff --git a/contrib/python/tornado/tornado-4/tornado/ya.make b/contrib/python/tornado/tornado-4/tornado/ya.make
index 195c1fad93..c960eea835 100644
--- a/contrib/python/tornado/tornado-4/tornado/ya.make
+++ b/contrib/python/tornado/tornado-4/tornado/ya.make
@@ -1 +1 @@
-OWNER(g:python-contrib)
+OWNER(g:python-contrib)
diff --git a/contrib/python/tornado/tornado-4/ya.make b/contrib/python/tornado/tornado-4/ya.make
index 0ea2ed6040..1efad04639 100644
--- a/contrib/python/tornado/tornado-4/ya.make
+++ b/contrib/python/tornado/tornado-4/ya.make
@@ -1,84 +1,84 @@
-OWNER(g:python-contrib dldmitry orivej)
-
-PY23_LIBRARY()
-
-LICENSE(Apache-2.0)
-
-VERSION(4.5.3)
-
+OWNER(g:python-contrib dldmitry orivej)
+
+PY23_LIBRARY()
+
+LICENSE(Apache-2.0)
+
+VERSION(4.5.3)
+
PROVIDES(tornado)
-PEERDIR(
- # because of ca bundle
- contrib/python/certifi
-)
-
-IF (PYTHON2)
- PEERDIR(
- contrib/python/backports_abc
- contrib/python/singledispatch
- )
-ENDIF()
-
-NO_CHECK_IMPORTS(
- tornado.platform.*
- tornado.curl_httpclient
-)
-
-NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- tornado/__init__.py
- tornado/_locale_data.py
- tornado/auth.py
- tornado/autoreload.py
- tornado/concurrent.py
- tornado/curl_httpclient.py
- tornado/escape.py
- tornado/gen.py
- tornado/http1connection.py
- tornado/httpclient.py
- tornado/httpserver.py
- tornado/httputil.py
- tornado/ioloop.py
- tornado/iostream.py
- tornado/locale.py
- tornado/locks.py
- tornado/log.py
- tornado/netutil.py
- tornado/options.py
- tornado/platform/__init__.py
- tornado/platform/asyncio.py
- tornado/platform/auto.py
- tornado/platform/caresresolver.py
- tornado/platform/common.py
- tornado/platform/epoll.py
- tornado/platform/interface.py
- tornado/platform/kqueue.py
- tornado/platform/posix.py
- tornado/platform/select.py
- tornado/platform/twisted.py
- tornado/platform/windows.py
- tornado/process.py
- tornado/queues.py
- tornado/routing.py
- tornado/simple_httpclient.py
- tornado/stack_context.py
- tornado/tcpclient.py
- tornado/tcpserver.py
- tornado/template.py
- tornado/testing.py
- tornado/util.py
- tornado/web.py
- tornado/websocket.py
- tornado/wsgi.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/tornado/tornado-4/
- .dist-info/METADATA
- .dist-info/top_level.txt
-)
-
-END()
+PEERDIR(
+ # because of ca bundle
+ contrib/python/certifi
+)
+
+IF (PYTHON2)
+ PEERDIR(
+ contrib/python/backports_abc
+ contrib/python/singledispatch
+ )
+ENDIF()
+
+NO_CHECK_IMPORTS(
+ tornado.platform.*
+ tornado.curl_httpclient
+)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ tornado/__init__.py
+ tornado/_locale_data.py
+ tornado/auth.py
+ tornado/autoreload.py
+ tornado/concurrent.py
+ tornado/curl_httpclient.py
+ tornado/escape.py
+ tornado/gen.py
+ tornado/http1connection.py
+ tornado/httpclient.py
+ tornado/httpserver.py
+ tornado/httputil.py
+ tornado/ioloop.py
+ tornado/iostream.py
+ tornado/locale.py
+ tornado/locks.py
+ tornado/log.py
+ tornado/netutil.py
+ tornado/options.py
+ tornado/platform/__init__.py
+ tornado/platform/asyncio.py
+ tornado/platform/auto.py
+ tornado/platform/caresresolver.py
+ tornado/platform/common.py
+ tornado/platform/epoll.py
+ tornado/platform/interface.py
+ tornado/platform/kqueue.py
+ tornado/platform/posix.py
+ tornado/platform/select.py
+ tornado/platform/twisted.py
+ tornado/platform/windows.py
+ tornado/process.py
+ tornado/queues.py
+ tornado/routing.py
+ tornado/simple_httpclient.py
+ tornado/stack_context.py
+ tornado/tcpclient.py
+ tornado/tcpserver.py
+ tornado/template.py
+ tornado/testing.py
+ tornado/util.py
+ tornado/web.py
+ tornado/websocket.py
+ tornado/wsgi.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/tornado/tornado-4/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/contrib/python/tornado/ya.make b/contrib/python/tornado/ya.make
index 85a91fb7c1..796f54155f 100644
--- a/contrib/python/tornado/ya.make
+++ b/contrib/python/tornado/ya.make
@@ -14,7 +14,7 @@ NO_LINT()
END()
-RECURSE(
- tornado-4
- tornado-6
+RECURSE(
+ tornado-4
+ tornado-6
)
diff --git a/contrib/python/ya.make b/contrib/python/ya.make
index d01ced9f3a..a9a10dac53 100644
--- a/contrib/python/ya.make
+++ b/contrib/python/ya.make
@@ -341,7 +341,7 @@ RECURSE(
environs
envoy
ephem
- escapism
+ escapism
etcd3
excel-formulas-calculator
execnet
@@ -565,9 +565,9 @@ RECURSE(
junitparser
jupyter_client
jupyter_core
- jupyter-telemetry
+ jupyter-telemetry
jupyterhub
- jupyterhub-traefik-proxy
+ jupyterhub-traefik-proxy
jupytext
kaitaistruct
kazoo
@@ -716,7 +716,7 @@ RECURSE(
Parsley
parso
partd
- passlib
+ passlib
patch
patched
path.py
diff --git a/ydb/tests/tools/ydb_serializable/lib/ya.make b/ydb/tests/tools/ydb_serializable/lib/ya.make
index 4567d7245d..0ae181b648 100644
--- a/ydb/tests/tools/ydb_serializable/lib/ya.make
+++ b/ydb/tests/tools/ydb_serializable/lib/ya.make
@@ -3,7 +3,7 @@ PY3_LIBRARY()
OWNER(g:kikimr)
PEERDIR(
- contrib/python/tornado/tornado-4
+ contrib/python/tornado/tornado-4
ydb/tests/library
ydb/public/sdk/python/ydb
)