aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/deprecated/python/subprocess32
diff options
context:
space:
mode:
authornkozlovskiy <nmk@ydb.tech>2023-10-02 18:57:38 +0300
committernkozlovskiy <nmk@ydb.tech>2023-10-02 19:39:06 +0300
commit6295ef4d23465c11296e898b9dc4524ad9592b5d (patch)
treefc0c852877b2c52f365a1f6ed0710955844338c2 /contrib/deprecated/python/subprocess32
parentde63c80b75948ecc13894854514d147840ff8430 (diff)
downloadydb-6295ef4d23465c11296e898b9dc4524ad9592b5d.tar.gz
oss ydb: fix dstool building and test run
Diffstat (limited to 'contrib/deprecated/python/subprocess32')
-rw-r--r--contrib/deprecated/python/subprocess32/ChangeLog185
-rw-r--r--contrib/deprecated/python/subprocess32/LICENSE283
-rw-r--r--contrib/deprecated/python/subprocess32/README.md50
-rw-r--r--contrib/deprecated/python/subprocess32/_posixsubprocess.c927
-rw-r--r--contrib/deprecated/python/subprocess32/_posixsubprocess_config.h115
-rw-r--r--contrib/deprecated/python/subprocess32/_posixsubprocess_helpers.c174
-rw-r--r--contrib/deprecated/python/subprocess32/subprocess32.py1752
-rw-r--r--contrib/deprecated/python/subprocess32/test_subprocess32.py2485
-rw-r--r--contrib/deprecated/python/subprocess32/testdata/fd_status.py34
-rw-r--r--contrib/deprecated/python/subprocess32/testdata/input_reader.py7
-rw-r--r--contrib/deprecated/python/subprocess32/testdata/qcat.py7
-rw-r--r--contrib/deprecated/python/subprocess32/testdata/qgrep.py10
-rw-r--r--contrib/deprecated/python/subprocess32/testdata/sigchild_ignore.py18
-rw-r--r--contrib/deprecated/python/subprocess32/testdata/ya.make19
-rw-r--r--contrib/deprecated/python/subprocess32/ya.make35
15 files changed, 6101 insertions, 0 deletions
diff --git a/contrib/deprecated/python/subprocess32/ChangeLog b/contrib/deprecated/python/subprocess32/ChangeLog
new file mode 100644
index 0000000000..7db7d8465e
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/ChangeLog
@@ -0,0 +1,185 @@
+-----------------
+2019-05-20 3.5.4
+-----------------
+ * Promoted 3.5.4rc2 to become 3.5.4.
+
+-----------------
+2019-05-13 3.5.4rc2
+-----------------
+ * GitHub [#57]: TimeoutExpired and CalledProcessError exceptions can now
+ be unpickled.
+
+-----------------
+2019-05-10 3.5.4rc1
+-----------------
+* GitHub [#61]: Backport the fixes for https://bugs.python.org/issue10963,
+ https://bugs.python.org/issue19612, and https://bugs.python.org/issue30418.
+ When a child process was so short lived it dies or closes its pipes before
+ you call communicate(). When supplying stdin or reading its output in
+ this scenario, do not raise an unexpected broken pipe or interrupted
+ system call exception.
+
+-----------------
+2018-10-09 3.5.3
+-----------------
+* Disallow ridiculously large numbers (millions) of arguments. [#54]
+
+-----------------
+2018-06-07 3.5.2
+-----------------
+* Explicitly include <signal.h> in _posixsubprocess_helpers.c; it already
+ gets configure checked and pulled in via Python's own <Python.h> in many
+ circumstances but it is better to be explicit. #IWYU
+ If you were using subprocess32 on a Python interpreter built *without*
+ the --with-fpectl configure option enabled, restore_signals is now
+ useful rather than a no-op. I do not know if such builds were common.
+* Adds a functional test for restore_signals=True behavior.
+
+-----------------
+2018-05-21 3.5.1
+-----------------
+* Fix AttributeError: 'module' object has no attribute 'Signals' when
+ constructing a CalledProcessError exception. [#49]
+
+-----------------
+2018-05-13 3.5.0 (3.5.0rc3)
+-----------------
+
+* Fixed the packaging problem where the stdlib python3_redirect shim is
+ supposed to be installed on Python 3.
+* Renamed _posixsubprocess to _posixsubprocess32 for consistency.
+* Unset CLOEXEC on file descriptors given to Popen pass_fds. (GH #4)
+* Drop support for Python 2.4 and 2.5.
+* Adds a configure script - run by setup.py - to supply the missing feature
+ #define's for the _posixsubprocess32 extension module for the things that
+ Python 2's own configure generated pyconfig.h does not already provide.
+
+-----------------
+2017-10-18 3.5.0rc1
+-----------------
+
+* Backport the new subprocess.run() API from Python 3.5.
+* Backport subprocess.DEVNULL support from 3.3.
+* Allow stderr to be redirected to stdout even when stdout is not redirected.
+ https://bugs.python.org/issue22274
+* Fix subprocess.Popen.wait() when the child process has exited to a
+ a stopped instead of terminated state (ex: when under ptrace).
+ https://bugs.python.org/issue29335
+* Include the private API needed by the multiprocessing module for people who
+ want to drop subprocess32 in as a replacement for their standard library
+ subprocess module.
+* Fix a compilation issue regarding O_CLOEXEC not being defined on ancient
+ Linux distros such as RHEL 5.
+
+-----------------
+2015-11-15 3.2.7
+-----------------
+
+* Issue #6973: When we know a subprocess.Popen process has died, do
+ not allow the send_signal(), terminate(), or kill() methods to do
+ anything as they could potentially signal a different process.
+* Issue #23564: Fixed a partially broken sanity check in the _posixsubprocess
+ internals regarding how fds_to_pass were passed to the child. The bug had
+ no actual impact as subprocess32.py already avoided it.
+
+-----------------
+2015-11-14 3.2.7rc2
+-----------------
+
+* Moved the repository from code.google.com to github.
+* Added a _WAIT_TIMEOUT to satisfy the unsupported people entirely on
+ their own trying to use this on Windows.
+* Similarly: Updated setup.py to not build the extension on non-posix.
+
+-----------------
+2014-06-01 3.2.7rc1
+-----------------
+
+* Issue #21618: The subprocess module could fail to close open fds that were
+ inherited by the calling process and already higher than POSIX resource
+ limits would otherwise allow. On systems with a functioning /proc/self/fd
+ or /dev/fd interface the max is now ignored and all fds are closed.
+
+-----------------
+2014-04-23 3.2.6
+-----------------
+
+* Fixes issue #21291: Popen.wait() is now thread safe so that multiple
+ threads may be calling wait() or poll() on a Popen instance at the same time
+ without losing the Popen.returncode value.
+* Fixes issue #14396: Handle the odd rare case of waitpid returning 0 when not
+ expected in Popen.wait().
+* Fixes issue #16962: Use getdents64 instead of the obsolete getdents syscall
+ on Linux. Some architectures do not implement the latter.
+
+-----------------
+2013-12-10 3.2.5
+-----------------
+
+* Fixes issue #15798: subprocess.Popen() no longer fails if file
+ descriptor 0, 1 or 2 is closed.
+* Fixes issue #18763: close_fd file descriptors are now closed after
+ any preexec_fn call.
+
+-----------------
+2013-06-15 3.2.5rc1
+-----------------
+
+* Fixes issue #16650 - Don't reference ECHILD from outside the local scope.
+* Unittests no longer spew any test data for human verification to stdout.
+* Remove a bare print to stdout that could have happened if the child process
+ wrote garbage to its pre-exec error pipe.
+* Fixes issue #16327 - the subprocess module no longer leaks file descriptors
+ used for stdin/stdout/stderr pipes to the child when the fork() fails. It
+ also no longer potentially double closes these pipe fds.
+* Correct the Python version check around use of imp_module to specify 2.6.3
+ as the minimum version that exists in. Why is anyone using such an old 2.6?
+* Fixes Issue #16114: The subprocess module no longer provides a misleading
+ error message stating that args[0] did not exist when either the cwd or
+ executable keyword arguments specified a path that did not exist.
+* Add more Popen cwd tests.
+* Handle errno.ECHILD in poll.
+* Don't leak a reference to the gc module on capi use error.
+* Check return value to avoid a crash if the capi were misused.
+* Check result of PyObject_IsTrue().
+* Adds test_universal_newlines_communicate_input_none.
+* Most everything above consists of backports. See the hg logs for their
+ upstream hg.python.org cpython revision numbers.
+
+----------------
+2012-06-10 3.2.3
+----------------
+
+* Fixes the references to the 'surrogateescape' unicode encoding error
+ handler that does not exist in Python 2.x. 'strict' is used so that
+ a UnicodeEncodeError exception is raised in these situations. These
+ MAY occur if your sys.getfilesystemencoding() is not UTF-8 and
+ attempt to use a non-ascii executable, args or env values. Prior to
+ this change, those would result in a hard to debug LookupError for
+ surrogateescape.
+* Issue #15000: Support the "unique" x32 architecture in _posixsubprocess.c.
+* Fix a compilation problem when O_CLOEXEC is not defined.
+
+------------------
+2012-02-18 3.2.3b1
+------------------
+
+This release brings in the last year and a half's worth of bugfixes and
+improvements to Python 3.2's subprocess module:
+
+Off the top of my head, some major bugfix highlights include:
+ * Timeout support on the APIs.
+ * close_fds=True is now the default (as it is in 3.2) and performs much faster.
+ * Fixed EINTR handling.
+ * Fixed SIGCHLD handling.
+ * Fixed several race conditions.
+ * Many more bug fixes too numerous to list.
+
+You can grep out the full list of improvements related to subprocess in:
+ http://hg.python.org/cpython/file/9ce5d456138b/Misc/NEWS
+
+-------------
+2010-06 3.2.0
+-------------
+
+This was the first release. Roughly equivalent to Python 3.2.0a1.
diff --git a/contrib/deprecated/python/subprocess32/LICENSE b/contrib/deprecated/python/subprocess32/LICENSE
new file mode 100644
index 0000000000..0d336624f4
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/LICENSE
@@ -0,0 +1,283 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC. Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
+year, the PythonLabs team moved to Digital Creations (now Zope
+Corporation, see http://www.zope.com). In 2001, the Python Software
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
+non-profit organization created specifically to own Python-related
+Intellectual Property. Zope Corporation is a sponsoring member of
+the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition). Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+ Release Derived Year Owner GPL-
+ from compatible? (1)
+
+ 0.9.0 thru 1.2 1991-1995 CWI yes
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
+ 1.6 1.5.2 2000 CNRI no
+ 2.0 1.6 2000 BeOpen.com no
+ 1.6.1 1.6 2001 CNRI yes (2)
+ 2.1 2.0+1.6.1 2001 PSF no
+ 2.0.1 2.0+1.6.1 2001 PSF yes
+ 2.1.1 2.1+2.0.1 2001 PSF yes
+ 2.2 2.1.1 2001 PSF yes
+ 2.1.2 2.1.1 2002 PSF yes
+ 2.1.3 2.1.2 2002 PSF yes
+ 2.2.1 2.2 2002 PSF yes
+ 2.2.2 2.2.1 2002 PSF yes
+ 2.2.3 2.2.2 2003 PSF yes
+ 2.3 2.2.2 2002-2003 PSF yes
+ 2.3.1 2.3 2002-2003 PSF yes
+ 2.3.2 2.3.1 2002-2003 PSF yes
+ 2.3.3 2.3.2 2002-2003 PSF yes
+ 2.3.4 2.3.3 2004 PSF yes
+ 2.3.5 2.3.4 2005 PSF yes
+ 2.4 2.3 2004 PSF yes
+ 2.4.1 2.4 2005 PSF yes
+ 2.4.2 2.4.1 2005 PSF yes
+ 2.4.3 2.4.2 2006 PSF yes
+ 2.4.4 2.4.3 2006 PSF yes
+ 2.5 2.4 2006 PSF yes
+ 2.5.1 2.5 2007 PSF yes
+ 2.5.2 2.5.1 2008 PSF yes
+ 2.5.3 2.5.2 2008 PSF yes
+ 2.6 2.5 2008 PSF yes
+ 2.6.1 2.6 2008 PSF yes
+ 2.6.2 2.6.1 2009 PSF yes
+ 2.6.3 2.6.2 2009 PSF yes
+ 2.6.4 2.6.3 2009 PSF yes
+ 2.6.5 2.6.4 2010 PSF yes
+ 3.0 2.6 2008 PSF yes
+ 3.0.1 3.0 2009 PSF yes
+ 3.1 3.0.1 2009 PSF yes
+ 3.1.1 3.1 2009 PSF yes
+ 3.1.2 3.1 2010 PSF yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+ the GPL. All Python licenses, unlike the GPL, let you distribute
+ a modified version without making your changes open source. The
+ GPL-compatible licenses make it possible to combine Python with
+ other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+ because its license has a choice of law clause. According to
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+ is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+Python Software Foundation; All Rights Reserved" are retained in Python alone or
+in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions. Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee. This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party. As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee. Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement. This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013. This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee. This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+ ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands. All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/contrib/deprecated/python/subprocess32/README.md b/contrib/deprecated/python/subprocess32/README.md
new file mode 100644
index 0000000000..919e0929ed
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/README.md
@@ -0,0 +1,50 @@
+subprocess32
+------------
+[![PyPI version](https://badge.fury.io/py/subprocess32.svg)](https://badge.fury.io/py/subprocess32)
+[![POSIX Build Status](https://travis-ci.org/google/python-subprocess32.svg?branch=master)](https://travis-ci.org/google/python-subprocess32)
+[![Windows Build Status](https://ci.appveyor.com/api/projects/status/53apbb2jk1uslj0m?svg=true
+)](https://ci.appveyor.com/project/gpshead/python-subprocess32)
+
+This is a backport of the Python 3 subprocess module for use on Python 2.
+This code has not been tested on Windows or other non-POSIX platforms.
+
+subprocess32 includes many important reliability bug fixes relevant on
+POSIX platforms. The most important of which is a C extension module
+used internally to handle the code path between fork() and exec().
+This module is reliable when an application is using threads.
+
+Refer to the
+[Python 3.5 subprocess documentation](https://docs.python.org/3.5/library/subprocess.html)
+for usage information.
+
+* Timeout support backported from Python 3.3 is included.
+* The run() API from Python 3.5 was backported in subprocess32 3.5.0.
+* Otherwise features are frozen at the 3.2 level.
+
+Usage
+-----
+
+The recommend pattern for cross platform code is to use the following:
+
+```python
+if os.name == 'posix' and sys.version_info[0] < 3:
+ import subprocess32 as subprocess
+else:
+ import subprocess
+```
+
+Or if you fully control your POSIX Python 2.7 installation, this can serve
+as a replacement for its subprocess module. Users will thank you by not
+filing concurrency bugs.
+
+Got Bugs?
+---------
+
+Try to reproduce them on the latest Python 3.x itself and file bug
+reports on [bugs.python.org](https://bugs.python.org/).
+Add gregory.p.smith to the Nosy list.
+
+If you have reason to believe the issue is specifically with this backport
+and not a problem in Python 3 itself, use the github issue tracker.
+
+-- Gregory P. Smith _greg@krypto.org_
diff --git a/contrib/deprecated/python/subprocess32/_posixsubprocess.c b/contrib/deprecated/python/subprocess32/_posixsubprocess.c
new file mode 100644
index 0000000000..b6cb77ca23
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/_posixsubprocess.c
@@ -0,0 +1,927 @@
+/* Authors: Gregory P. Smith & Jeffrey Yasskin */
+
+/* We use our own small autoconf to fill in for things that were not checked
+ * for in Python 2's configure and thus pyconfig.h.
+ *
+ * This comes before Python.h on purpose. 2.7's Python.h redefines critical
+ * defines such as _POSIX_C_SOURCE with undesirable old values impacting system
+ * which header defines are available.
+ */
+#include "_posixsubprocess_config.h"
+#ifdef HAVE_SYS_CDEFS_H
+#include <sys/cdefs.h>
+#endif
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+
+#include <unistd.h>
+#include <fcntl.h>
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#if defined(HAVE_SYS_STAT_H) && defined(__FreeBSD__)
+#include <sys/stat.h>
+#endif
+#ifdef HAVE_SYS_SYSCALL_H
+#include <sys/syscall.h>
+#endif
+#ifdef HAVE_DIRENT_H
+#include <dirent.h>
+#endif
+
+/* TODO: Some platform conditions below could move into configure.ac. */
+
+#if defined(__ANDROID__) && !defined(SYS_getdents64)
+/* Android doesn't expose syscalls, add the definition manually. */
+# include <sys/linux-syscalls.h>
+# define SYS_getdents64 __NR_getdents64
+#endif
+
+#include "_posixsubprocess_helpers.c"
+
+#if (PY_VERSION_HEX < 0x02060300)
+/* These are not public API fuctions until 2.6.3. */
+static void _PyImport_AcquireLock(void);
+static int _PyImport_ReleaseLock(void);
+#endif
+
+#if defined(sun)
+/* readdir64 is used to work around Solaris 9 bug 6395699. */
+# define readdir readdir64
+# define dirent dirent64
+# if !defined(HAVE_DIRFD)
+/* Some versions of Solaris lack dirfd(). */
+# define dirfd(dirp) ((dirp)->dd_fd)
+# define HAVE_DIRFD
+# endif
+#endif
+
+#if defined(__FreeBSD__) || (defined(__APPLE__) && defined(__MACH__))
+# define FD_DIR "/dev/fd"
+#else
+# define FD_DIR "/proc/self/fd"
+#endif
+
+#define POSIX_CALL(call) if ((call) == -1) goto error
+
+
+/* Given the gc module call gc.enable() and return 0 on success. */
+static int
+_enable_gc(PyObject *gc_module)
+{
+ PyObject *result;
+ result = PyObject_CallMethod(gc_module, "enable", NULL);
+ if (result == NULL)
+ return 1;
+ Py_DECREF(result);
+ return 0;
+}
+
+
+/* Convert ASCII to a positive int, no libc call. no overflow. -1 on error. */
+static int
+_pos_int_from_ascii(char *name)
+{
+ int num = 0;
+ while (*name >= '0' && *name <= '9') {
+ num = num * 10 + (*name - '0');
+ ++name;
+ }
+ if (*name)
+ return -1; /* Non digit found, not a number. */
+ return num;
+}
+
+
+#if defined(__FreeBSD__)
+/* When /dev/fd isn't mounted it is often a static directory populated
+ * with 0 1 2 or entries for 0 .. 63 on FreeBSD, NetBSD and OpenBSD.
+ * NetBSD and OpenBSD have a /proc fs available (though not necessarily
+ * mounted) and do not have fdescfs for /dev/fd. MacOS X has a devfs
+ * that properly supports /dev/fd.
+ */
+static int
+_is_fdescfs_mounted_on_dev_fd()
+{
+ struct stat dev_stat;
+ struct stat dev_fd_stat;
+ if (stat("/dev", &dev_stat) != 0)
+ return 0;
+ if (stat(FD_DIR, &dev_fd_stat) != 0)
+ return 0;
+ if (dev_stat.st_dev == dev_fd_stat.st_dev)
+ return 0; /* / == /dev == /dev/fd means it is static. #fail */
+ return 1;
+}
+#endif
+
+
+/* Returns 1 if there is a problem with fd_sequence, 0 otherwise. */
+static int
+_sanity_check_python_fd_sequence(PyObject *fd_sequence)
+{
+ Py_ssize_t seq_idx, seq_len = PySequence_Length(fd_sequence);
+ long prev_fd = -1;
+ for (seq_idx = 0; seq_idx < seq_len; ++seq_idx) {
+ PyObject* py_fd = PySequence_Fast_GET_ITEM(fd_sequence, seq_idx);
+ long iter_fd = PyLong_AsLong(py_fd);
+ if (iter_fd < 0 || iter_fd <= prev_fd || iter_fd > INT_MAX) {
+ /* Negative, overflow, not a Long, unsorted, too big for a fd. */
+ return 1;
+ }
+ prev_fd = iter_fd;
+ }
+ return 0;
+}
+
+
+/* Is fd found in the sorted Python Sequence? */
+static int
+_is_fd_in_sorted_fd_sequence(int fd, PyObject *fd_sequence)
+{
+ /* Binary search. */
+ Py_ssize_t search_min = 0;
+ Py_ssize_t search_max = PySequence_Length(fd_sequence) - 1;
+ if (search_max < 0)
+ return 0;
+ do {
+ long middle = (search_min + search_max) / 2;
+ long middle_fd = PyLong_AsLong(
+ PySequence_Fast_GET_ITEM(fd_sequence, middle));
+ if (fd == middle_fd)
+ return 1;
+ if (fd > middle_fd)
+ search_min = middle + 1;
+ else
+ search_max = middle - 1;
+ } while (search_min <= search_max);
+ return 0;
+}
+
+
+/* Get the maximum file descriptor that could be opened by this process.
+ * This function is async signal safe for use between fork() and exec().
+ */
+static long
+safe_get_max_fd(void)
+{
+ long local_max_fd;
+#if defined(__NetBSD__)
+ local_max_fd = fcntl(0, F_MAXFD);
+ if (local_max_fd >= 0)
+ return local_max_fd;
+#endif
+#ifdef _SC_OPEN_MAX
+ local_max_fd = sysconf(_SC_OPEN_MAX);
+ if (local_max_fd == -1)
+#endif
+ local_max_fd = 256; /* Matches legacy Lib/subprocess.py behavior. */
+ return local_max_fd;
+}
+
+/* While uncommon in Python 2 applications, this makes sure the
+ * close on exec flag is unset on the subprocess32.Popen pass_fds.
+ * https://github.com/google/python-subprocess32/issues/4.
+ */
+static void
+_unset_cloexec_on_fds(PyObject *py_fds_to_keep, int errpipe_write)
+{
+#ifdef FD_CLOEXEC
+ Py_ssize_t num_fds_to_keep = PySequence_Length(py_fds_to_keep);
+ Py_ssize_t keep_seq_idx;
+ /* As py_fds_to_keep is sorted we can loop through the list closing
+ * fds inbetween any in the keep list falling within our range. */
+ for (keep_seq_idx = 0; keep_seq_idx < num_fds_to_keep; ++keep_seq_idx) {
+ PyObject* py_keep_fd = PySequence_Fast_GET_ITEM(py_fds_to_keep,
+ keep_seq_idx);
+ // We just keep going on errors below, there is nothing we can
+ // usefully do to report them. This is best effort.
+ long fd = PyLong_AsLong(py_keep_fd);
+ if (fd < 0) continue;
+ if (fd == errpipe_write) continue; // This one keeps its CLOEXEC.
+ // We could use ioctl FIONCLEX, but that is a more modern API
+ // not available everywhere and we are a single threaded child.
+ int old_flags = fcntl(fd, F_GETFD);
+ if (old_flags != -1) {
+ fcntl(fd, F_SETFD, old_flags & ~FD_CLOEXEC);
+ }
+ }
+#endif
+}
+
+/* Close all file descriptors in the range from start_fd and higher
+ * except for those in py_fds_to_keep. If the range defined by
+ * [start_fd, safe_get_max_fd()) is large this will take a long
+ * time as it calls close() on EVERY possible fd.
+ *
+ * It isn't possible to know for sure what the max fd to go up to
+ * is for processes with the capability of raising their maximum.
+ */
+static void
+_close_fds_by_brute_force(long start_fd, PyObject *py_fds_to_keep)
+{
+ long end_fd = safe_get_max_fd();
+ Py_ssize_t num_fds_to_keep = PySequence_Length(py_fds_to_keep);
+ Py_ssize_t keep_seq_idx;
+ int fd_num;
+ /* As py_fds_to_keep is sorted we can loop through the list closing
+ * fds inbetween any in the keep list falling within our range. */
+ for (keep_seq_idx = 0; keep_seq_idx < num_fds_to_keep; ++keep_seq_idx) {
+ PyObject* py_keep_fd = PySequence_Fast_GET_ITEM(py_fds_to_keep,
+ keep_seq_idx);
+ int keep_fd = PyLong_AsLong(py_keep_fd);
+ if (keep_fd < start_fd)
+ continue;
+ for (fd_num = start_fd; fd_num < keep_fd; ++fd_num) {
+ while (close(fd_num) < 0 && errno == EINTR);
+ }
+ start_fd = keep_fd + 1;
+ }
+ if (start_fd <= end_fd) {
+ for (fd_num = start_fd; fd_num < end_fd; ++fd_num) {
+ while (close(fd_num) < 0 && errno == EINTR);
+ }
+ }
+}
+
+
+#if defined(__linux__) && defined(HAVE_SYS_SYSCALL_H)
+/* It doesn't matter if d_name has room for NAME_MAX chars; we're using this
+ * only to read a directory of short file descriptor number names. The kernel
+ * will return an error if we didn't give it enough space. Highly Unlikely.
+ * This structure is very old and stable: It will not change unless the kernel
+ * chooses to break compatibility with all existing binaries. Highly Unlikely.
+ */
+struct linux_dirent64 {
+ unsigned long long d_ino;
+ long long d_off;
+ unsigned short d_reclen; /* Length of this linux_dirent */
+ unsigned char d_type;
+ char d_name[256]; /* Filename (null-terminated) */
+};
+
+/* Close all open file descriptors in the range from start_fd and higher
+ * Do not close any in the sorted py_fds_to_keep list.
+ *
+ * This version is async signal safe as it does not make any unsafe C library
+ * calls, malloc calls or handle any locks. It is _unfortunate_ to be forced
+ * to resort to making a kernel system call directly but this is the ONLY api
+ * available that does no harm. opendir/readdir/closedir perform memory
+ * allocation and locking so while they usually work they are not guaranteed
+ * to (especially if you have replaced your malloc implementation). A version
+ * of this function that uses those can be found in the _maybe_unsafe variant.
+ *
+ * This is Linux specific because that is all I am ready to test it on. It
+ * should be easy to add OS specific dirent or dirent64 structures and modify
+ * it with some cpp #define magic to work on other OSes as well if you want.
+ */
+static void
+_close_open_fds_safe(int start_fd, PyObject* py_fds_to_keep)
+{
+ int fd_dir_fd;
+#ifdef O_CLOEXEC
+ fd_dir_fd = open(FD_DIR, O_RDONLY | O_CLOEXEC, 0);
+#else
+ fd_dir_fd = open(FD_DIR, O_RDONLY, 0);
+#ifdef FD_CLOEXEC
+ {
+ int old = fcntl(fd_dir_fd, F_GETFD);
+ if (old != -1)
+ fcntl(fd_dir_fd, F_SETFD, old | FD_CLOEXEC);
+ }
+#endif
+#endif
+ if (fd_dir_fd == -1) {
+ /* No way to get a list of open fds. */
+ _close_fds_by_brute_force(start_fd, py_fds_to_keep);
+ return;
+ } else {
+ char buffer[sizeof(struct linux_dirent64)] = {0};
+ int bytes;
+ while ((bytes = syscall(SYS_getdents64, fd_dir_fd,
+ (struct linux_dirent64 *)buffer,
+ sizeof(buffer))) > 0) {
+ struct linux_dirent64 *entry;
+ int offset;
+ for (offset = 0; offset < bytes; offset += entry->d_reclen) {
+ int fd;
+ entry = (struct linux_dirent64 *)(buffer + offset);
+ if ((fd = _pos_int_from_ascii(entry->d_name)) < 0)
+ continue; /* Not a number. */
+ if (fd != fd_dir_fd && fd >= start_fd &&
+ !_is_fd_in_sorted_fd_sequence(fd, py_fds_to_keep)) {
+ while (close(fd) < 0 && errno == EINTR);
+ }
+ }
+ }
+ while (close(fd_dir_fd) < 0 && errno == EINTR);
+ }
+}
+
+#define _close_open_fds _close_open_fds_safe
+
+#else /* NOT (defined(__linux__) && defined(HAVE_SYS_SYSCALL_H)) */
+
+
+/* Close all open file descriptors from start_fd and higher.
+ * Do not close any in the sorted py_fds_to_keep list.
+ *
+ * This function violates the strict use of async signal safe functions. :(
+ * It calls opendir(), readdir() and closedir(). Of these, the one most
+ * likely to ever cause a problem is opendir() as it performs an internal
+ * malloc(). Practically this should not be a problem. The Java VM makes the
+ * same calls between fork and exec in its own UNIXProcess_md.c implementation.
+ *
+ * readdir_r() is not used because it provides no benefit. It is typically
+ * implemented as readdir() followed by memcpy(). See also:
+ * http://womble.decadent.org.uk/readdir_r-advisory.html
+ */
+static void
+_close_open_fds_maybe_unsafe(long start_fd, PyObject* py_fds_to_keep)
+{
+ DIR *proc_fd_dir;
+#ifndef HAVE_DIRFD
+ while (_is_fd_in_sorted_fd_sequence(start_fd, py_fds_to_keep)) {
+ ++start_fd;
+ }
+ /* Close our lowest fd before we call opendir so that it is likely to
+ * reuse that fd otherwise we might close opendir's file descriptor in
+ * our loop. This trick assumes that fd's are allocated on a lowest
+ * available basis. */
+ while (close(start_fd) < 0 && errno == EINTR);
+ ++start_fd;
+#endif
+
+#if defined(__FreeBSD__)
+ if (!_is_fdescfs_mounted_on_dev_fd())
+ proc_fd_dir = NULL;
+ else
+#endif
+ proc_fd_dir = opendir(FD_DIR);
+ if (!proc_fd_dir) {
+ /* No way to get a list of open fds. */
+ _close_fds_by_brute_force(start_fd, py_fds_to_keep);
+ } else {
+ struct dirent *dir_entry;
+#ifdef HAVE_DIRFD
+ int fd_used_by_opendir = dirfd(proc_fd_dir);
+#else
+ int fd_used_by_opendir = start_fd - 1;
+#endif
+ errno = 0;
+ while ((dir_entry = readdir(proc_fd_dir))) {
+ int fd;
+ if ((fd = _pos_int_from_ascii(dir_entry->d_name)) < 0)
+ continue; /* Not a number. */
+ if (fd != fd_used_by_opendir && fd >= start_fd &&
+ !_is_fd_in_sorted_fd_sequence(fd, py_fds_to_keep)) {
+ while (close(fd) < 0 && errno == EINTR);
+ }
+ errno = 0;
+ }
+ if (errno) {
+ /* readdir error, revert behavior. Highly Unlikely. */
+ _close_fds_by_brute_force(start_fd, py_fds_to_keep);
+ }
+ closedir(proc_fd_dir);
+ }
+}
+
+#define _close_open_fds _close_open_fds_maybe_unsafe
+
+#endif /* else NOT (defined(__linux__) && defined(HAVE_SYS_SYSCALL_H)) */
+
+
+/*
+ * This function is code executed in the child process immediately after fork
+ * to set things up and call exec().
+ *
+ * All of the code in this function must only use async-signal-safe functions,
+ * listed at `man 7 signal` or
+ * http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html.
+ *
+ * This restriction is documented at
+ * http://www.opengroup.org/onlinepubs/009695399/functions/fork.html.
+ */
+static void
+child_exec(char *const exec_array[],
+ char *const argv[],
+ char *const envp[],
+ const char *cwd,
+ int p2cread, int p2cwrite,
+ int c2pread, int c2pwrite,
+ int errread, int errwrite,
+ int errpipe_read, int errpipe_write,
+ int close_fds, int restore_signals,
+ int call_setsid,
+ PyObject *py_fds_to_keep,
+ PyObject *preexec_fn,
+ PyObject *preexec_fn_args_tuple)
+{
+ int i, saved_errno, unused, reached_preexec = 0;
+ PyObject *result;
+ const char* err_msg = "";
+ /* Buffer large enough to hold a hex integer. We can't malloc. */
+ char hex_errno[sizeof(saved_errno)*2+1];
+
+ /* Close parent's pipe ends. */
+ if (p2cwrite != -1) {
+ POSIX_CALL(close(p2cwrite));
+ }
+ if (c2pread != -1) {
+ POSIX_CALL(close(c2pread));
+ }
+ if (errread != -1) {
+ POSIX_CALL(close(errread));
+ }
+ POSIX_CALL(close(errpipe_read));
+
+ /* When duping fds, if there arises a situation where one of the fds is
+ either 0, 1 or 2, it is possible that it is overwritten (#12607). */
+ if (c2pwrite == 0)
+ POSIX_CALL(c2pwrite = dup(c2pwrite));
+ if (errwrite == 0 || errwrite == 1)
+ POSIX_CALL(errwrite = dup(errwrite));
+
+ /* Dup fds for child.
+ dup2() removes the CLOEXEC flag but we must do it ourselves if dup2()
+ would be a no-op (issue #10806). */
+ if (p2cread == 0) {
+ int old = fcntl(p2cread, F_GETFD);
+ if (old != -1)
+ fcntl(p2cread, F_SETFD, old & ~FD_CLOEXEC);
+ } else if (p2cread != -1) {
+ POSIX_CALL(dup2(p2cread, 0)); /* stdin */
+ }
+ if (c2pwrite == 1) {
+ int old = fcntl(c2pwrite, F_GETFD);
+ if (old != -1)
+ fcntl(c2pwrite, F_SETFD, old & ~FD_CLOEXEC);
+ } else if (c2pwrite != -1) {
+ POSIX_CALL(dup2(c2pwrite, 1)); /* stdout */
+ }
+ if (errwrite == 2) {
+ int old = fcntl(errwrite, F_GETFD);
+ if (old != -1)
+ fcntl(errwrite, F_SETFD, old & ~FD_CLOEXEC);
+ } else if (errwrite != -1) {
+ POSIX_CALL(dup2(errwrite, 2)); /* stderr */
+ }
+
+ /* Close pipe fds. Make sure we don't close the same fd more than */
+ /* once, or standard fds. */
+ if (p2cread > 2) {
+ POSIX_CALL(close(p2cread));
+ }
+ if (c2pwrite > 2 && c2pwrite != p2cread) {
+ POSIX_CALL(close(c2pwrite));
+ }
+ if (errwrite != c2pwrite && errwrite != p2cread && errwrite > 2) {
+ POSIX_CALL(close(errwrite));
+ }
+
+ if (cwd)
+ POSIX_CALL(chdir(cwd));
+
+ if (restore_signals)
+ _Py_RestoreSignals();
+
+#ifdef HAVE_SETSID
+ if (call_setsid)
+ POSIX_CALL(setsid());
+#endif
+
+ reached_preexec = 1;
+ if (preexec_fn != Py_None && preexec_fn_args_tuple) {
+ /* This is where the user has asked us to deadlock their program. */
+ result = PyObject_Call(preexec_fn, preexec_fn_args_tuple, NULL);
+ if (result == NULL) {
+ /* Stringifying the exception or traceback would involve
+ * memory allocation and thus potential for deadlock.
+ * We've already faced potential deadlock by calling back
+ * into Python in the first place, so it probably doesn't
+ * matter but we avoid it to minimize the possibility. */
+ err_msg = "Exception occurred in preexec_fn.";
+ errno = 0; /* We don't want to report an OSError. */
+ goto error;
+ }
+ /* Py_DECREF(result); - We're about to exec so why bother? */
+ }
+
+ _unset_cloexec_on_fds(py_fds_to_keep, errpipe_write);
+ if (close_fds) {
+ /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */
+ _close_open_fds(3, py_fds_to_keep);
+ }
+
+ /* This loop matches the Lib/os.py _execvpe()'s PATH search when */
+ /* given the executable_list generated by Lib/subprocess.py. */
+ saved_errno = 0;
+ for (i = 0; exec_array[i] != NULL; ++i) {
+ const char *executable = exec_array[i];
+ if (envp) {
+ execve(executable, argv, envp);
+ } else {
+ execv(executable, argv);
+ }
+ if (errno != ENOENT && errno != ENOTDIR && saved_errno == 0) {
+ saved_errno = errno;
+ }
+ }
+ /* Report the first exec error, not the last. */
+ if (saved_errno)
+ errno = saved_errno;
+
+error:
+ saved_errno = errno;
+ /* Report the posix error to our parent process. */
+ /* We ignore all write() return values as the total size of our writes is
+ * less than PIPEBUF and we cannot do anything about an error anyways. */
+ if (saved_errno) {
+ char *cur;
+ unused = write(errpipe_write, "OSError:", 8);
+ cur = hex_errno + sizeof(hex_errno);
+ while (saved_errno != 0 && cur > hex_errno) {
+ *--cur = "0123456789ABCDEF"[saved_errno % 16];
+ saved_errno /= 16;
+ }
+ unused = write(errpipe_write, cur, hex_errno + sizeof(hex_errno) - cur);
+ unused = write(errpipe_write, ":", 1);
+ if (!reached_preexec) {
+ /* Indicate to the parent that the error happened before exec(). */
+ unused = write(errpipe_write, "noexec", 6);
+ }
+ /* We can't call strerror(saved_errno). It is not async signal safe.
+ * The parent process will look the error message up. */
+ } else {
+ unused = write(errpipe_write, "RuntimeError:0:", 15);
+ unused = write(errpipe_write, err_msg, strlen(err_msg));
+ }
+ if (unused) return; /* silly? yes! avoids gcc compiler warning. */
+}
+
+
+static PyObject *
+subprocess_fork_exec(PyObject* self, PyObject *args)
+{
+ PyObject *gc_module = NULL;
+ PyObject *executable_list, *py_close_fds, *py_fds_to_keep;
+ PyObject *env_list, *preexec_fn;
+ PyObject *process_args, *converted_args = NULL, *fast_args = NULL;
+ PyObject *preexec_fn_args_tuple = NULL;
+ int p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite;
+ int errpipe_read, errpipe_write, close_fds, restore_signals;
+ int call_setsid;
+ PyObject *cwd_obj, *cwd_obj2;
+ const char *cwd;
+ pid_t pid;
+ int need_to_reenable_gc = 0;
+ char *const *exec_array, *const *argv = NULL, *const *envp = NULL;
+ Py_ssize_t arg_num;
+
+ if (!PyArg_ParseTuple(
+ args, "OOOOOOiiiiiiiiiiO:fork_exec",
+ &process_args, &executable_list, &py_close_fds, &py_fds_to_keep,
+ &cwd_obj, &env_list,
+ &p2cread, &p2cwrite, &c2pread, &c2pwrite,
+ &errread, &errwrite, &errpipe_read, &errpipe_write,
+ &restore_signals, &call_setsid, &preexec_fn))
+ return NULL;
+
+ close_fds = PyObject_IsTrue(py_close_fds);
+ if (close_fds < 0)
+ return NULL;
+ if (close_fds && errpipe_write < 3) { /* precondition */
+ PyErr_SetString(PyExc_ValueError, "errpipe_write must be >= 3");
+ return NULL;
+ }
+ if (PySequence_Length(py_fds_to_keep) < 0) {
+ PyErr_SetString(PyExc_ValueError, "cannot get length of fds_to_keep");
+ return NULL;
+ }
+ if (_sanity_check_python_fd_sequence(py_fds_to_keep)) {
+ PyErr_SetString(PyExc_ValueError, "bad value(s) in fds_to_keep");
+ return NULL;
+ }
+
+ /* We need to call gc.disable() when we'll be calling preexec_fn */
+ if (preexec_fn != Py_None) {
+ PyObject *result;
+ gc_module = PyImport_ImportModule("gc");
+ if (gc_module == NULL)
+ return NULL;
+ result = PyObject_CallMethod(gc_module, "isenabled", NULL);
+ if (result == NULL) {
+ Py_DECREF(gc_module);
+ return NULL;
+ }
+ need_to_reenable_gc = PyObject_IsTrue(result);
+ Py_DECREF(result);
+ if (need_to_reenable_gc == -1) {
+ Py_DECREF(gc_module);
+ return NULL;
+ }
+ result = PyObject_CallMethod(gc_module, "disable", NULL);
+ if (result == NULL) {
+ Py_DECREF(gc_module);
+ return NULL;
+ }
+ Py_DECREF(result);
+ }
+
+ exec_array = _PySequence_BytesToCharpArray(executable_list);
+ if (!exec_array) {
+ Py_XDECREF(gc_module);
+ return NULL;
+ }
+
+ /* Convert args and env into appropriate arguments for exec() */
+ /* These conversions are done in the parent process to avoid allocating
+ or freeing memory in the child process. */
+ if (process_args != Py_None) {
+ Py_ssize_t num_args;
+ /* Equivalent to: */
+ /* tuple(PyUnicode_FSConverter(arg) for arg in process_args) */
+ fast_args = PySequence_Fast(process_args, "argv must be a tuple");
+ if (fast_args == NULL)
+ goto cleanup;
+ num_args = PySequence_Fast_GET_SIZE(fast_args);
+ converted_args = PyTuple_New(num_args);
+ if (converted_args == NULL)
+ goto cleanup;
+ for (arg_num = 0; arg_num < num_args; ++arg_num) {
+ PyObject *borrowed_arg, *converted_arg;
+ borrowed_arg = PySequence_Fast_GET_ITEM(fast_args, arg_num);
+ if (PyUnicode_FSConverter(borrowed_arg, &converted_arg) == 0)
+ goto cleanup;
+ PyTuple_SET_ITEM(converted_args, arg_num, converted_arg);
+ }
+
+ argv = _PySequence_BytesToCharpArray(converted_args);
+ Py_CLEAR(converted_args);
+ Py_CLEAR(fast_args);
+ if (!argv)
+ goto cleanup;
+ }
+
+ if (env_list != Py_None) {
+ envp = _PySequence_BytesToCharpArray(env_list);
+ if (!envp)
+ goto cleanup;
+ }
+
+ if (preexec_fn != Py_None) {
+ preexec_fn_args_tuple = PyTuple_New(0);
+ if (!preexec_fn_args_tuple)
+ goto cleanup;
+ _PyImport_AcquireLock();
+ }
+
+ if (cwd_obj != Py_None) {
+ if (PyUnicode_FSConverter(cwd_obj, &cwd_obj2) == 0)
+ goto cleanup;
+ cwd = PyString_AsString(cwd_obj2);
+ } else {
+ cwd = NULL;
+ cwd_obj2 = NULL;
+ }
+
+ pid = fork();
+ if (pid == 0) {
+ /* Child process */
+ /*
+ * Code from here to _exit() must only use async-signal-safe functions,
+ * listed at `man 7 signal` or
+ * http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html.
+ */
+
+ if (preexec_fn != Py_None) {
+ /* We'll be calling back into Python later so we need to do this.
+ * This call may not be async-signal-safe but neither is calling
+ * back into Python. The user asked us to use hope as a strategy
+ * to avoid deadlock... */
+ PyOS_AfterFork();
+ }
+
+ child_exec(exec_array, argv, envp, cwd,
+ p2cread, p2cwrite, c2pread, c2pwrite,
+ errread, errwrite, errpipe_read, errpipe_write,
+ close_fds, restore_signals, call_setsid,
+ py_fds_to_keep, preexec_fn, preexec_fn_args_tuple);
+ _exit(255);
+ return NULL; /* Dead code to avoid a potential compiler warning. */
+ }
+ Py_XDECREF(cwd_obj2);
+
+ if (pid == -1) {
+ /* Capture the errno exception before errno can be clobbered. */
+ PyErr_SetFromErrno(PyExc_OSError);
+ }
+ if (preexec_fn != Py_None &&
+ _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "not holding the import lock");
+ }
+
+ /* Parent process */
+ if (envp)
+ _Py_FreeCharPArray(envp);
+ if (argv)
+ _Py_FreeCharPArray(argv);
+ _Py_FreeCharPArray(exec_array);
+
+ /* Reenable gc in the parent process (or if fork failed). */
+ if (need_to_reenable_gc && _enable_gc(gc_module)) {
+ Py_XDECREF(gc_module);
+ return NULL;
+ }
+ Py_XDECREF(preexec_fn_args_tuple);
+ Py_XDECREF(gc_module);
+
+ if (pid == -1)
+ return NULL; /* fork() failed. Exception set earlier. */
+
+ return PyLong_FromPid(pid);
+
+cleanup:
+ if (envp)
+ _Py_FreeCharPArray(envp);
+ if (argv)
+ _Py_FreeCharPArray(argv);
+ _Py_FreeCharPArray(exec_array);
+ Py_XDECREF(converted_args);
+ Py_XDECREF(fast_args);
+ Py_XDECREF(preexec_fn_args_tuple);
+
+ /* Reenable gc if it was disabled. */
+ if (need_to_reenable_gc)
+ _enable_gc(gc_module);
+ Py_XDECREF(gc_module);
+ return NULL;
+}
+
+
+PyDoc_STRVAR(subprocess_fork_exec_doc,
+"fork_exec(args, executable_list, close_fds, cwd, env,\n\
+ p2cread, p2cwrite, c2pread, c2pwrite,\n\
+ errread, errwrite, errpipe_read, errpipe_write,\n\
+ restore_signals, call_setsid, preexec_fn)\n\
+\n\
+Forks a child process, closes parent file descriptors as appropriate in the\n\
+child and dups the few that are needed before calling exec() in the child\n\
+process.\n\
+\n\
+The preexec_fn, if supplied, will be called immediately before exec.\n\
+WARNING: preexec_fn is NOT SAFE if your application uses threads.\n\
+ It may trigger infrequent, difficult to debug deadlocks.\n\
+\n\
+If an error occurs in the child process before the exec, it is\n\
+serialized and written to the errpipe_write fd per subprocess.py.\n\
+\n\
+Returns: the child process's PID.\n\
+\n\
+Raises: Only on an error in the parent process.\n\
+");
+
+PyDoc_STRVAR(subprocess_cloexec_pipe_doc,
+"cloexec_pipe() -> (read_end, write_end)\n\n\
+Create a pipe whose ends have the cloexec flag set; write_end will be >= 3.");
+
+static PyObject *
+subprocess_cloexec_pipe(PyObject *self, PyObject *noargs)
+{
+ int fds[2];
+ int res, saved_errno;
+ long oldflags;
+#if (defined(HAVE_PIPE2) && defined(O_CLOEXEC))
+ Py_BEGIN_ALLOW_THREADS
+ res = pipe2(fds, O_CLOEXEC);
+ Py_END_ALLOW_THREADS
+ if (res != 0 && errno == ENOSYS)
+ {
+#endif
+ /* We hold the GIL which offers some protection from other code calling
+ * fork() before the CLOEXEC flags have been set but we can't guarantee
+ * anything without pipe2(). */
+ res = pipe(fds);
+
+ if (res == 0) {
+ oldflags = fcntl(fds[0], F_GETFD, 0);
+ if (oldflags < 0) res = oldflags;
+ }
+ if (res == 0)
+ res = fcntl(fds[0], F_SETFD, oldflags | FD_CLOEXEC);
+
+ if (res == 0) {
+ oldflags = fcntl(fds[1], F_GETFD, 0);
+ if (oldflags < 0) res = oldflags;
+ }
+ if (res == 0)
+ res = fcntl(fds[1], F_SETFD, oldflags | FD_CLOEXEC);
+#if (defined(HAVE_PIPE2) && defined(O_CLOEXEC))
+ }
+#endif
+ if (res == 0 && fds[1] < 3) {
+ /* We always want the write end of the pipe to avoid fds 0, 1 and 2
+ * as our child may claim those for stdio connections. */
+ int write_fd = fds[1];
+ int fds_to_close[3] = {-1, -1, -1};
+ int fds_to_close_idx = 0;
+#ifdef F_DUPFD_CLOEXEC
+ fds_to_close[fds_to_close_idx++] = write_fd;
+ write_fd = fcntl(write_fd, F_DUPFD_CLOEXEC, 3);
+ if (write_fd < 0) /* We don't support F_DUPFD_CLOEXEC / other error */
+#endif
+ {
+ /* Use dup a few times until we get a desirable fd. */
+ for (; fds_to_close_idx < 3; ++fds_to_close_idx) {
+ fds_to_close[fds_to_close_idx] = write_fd;
+ write_fd = dup(write_fd);
+ if (write_fd >= 3)
+ break;
+ /* We may dup a few extra times if it returns an error but
+ * that is okay. Repeat calls should return the same error. */
+ }
+ if (write_fd < 0) res = write_fd;
+ if (res == 0) {
+ oldflags = fcntl(write_fd, F_GETFD, 0);
+ if (oldflags < 0) res = oldflags;
+ if (res == 0)
+ res = fcntl(write_fd, F_SETFD, oldflags | FD_CLOEXEC);
+ }
+ }
+ saved_errno = errno;
+ /* Close fds we tried for the write end that were too low. */
+ for (fds_to_close_idx=0; fds_to_close_idx < 3; ++fds_to_close_idx) {
+ int temp_fd = fds_to_close[fds_to_close_idx];
+ while (temp_fd >= 0 && close(temp_fd) < 0 && errno == EINTR);
+ }
+ errno = saved_errno; /* report dup or fcntl errors, not close. */
+ fds[1] = write_fd;
+ } /* end if write fd was too small */
+
+ if (res != 0)
+ return PyErr_SetFromErrno(PyExc_OSError);
+ return Py_BuildValue("(ii)", fds[0], fds[1]);
+}
+
+/* module level code ********************************************************/
+
+#define MIN_PY_VERSION_WITH_PYIMPORT_ACQUIRELOCK 0x02060300
+#if (PY_VERSION_HEX < MIN_PY_VERSION_WITH_PYIMPORT_ACQUIRELOCK)
+static PyObject* imp_module;
+
+static void
+_PyImport_AcquireLock(void)
+{
+ PyObject *result;
+ result = PyObject_CallMethod(imp_module, "acquire_lock", NULL);
+ if (result == NULL) {
+ fprintf(stderr, "imp.acquire_lock() failed.\n");
+ return;
+ }
+ Py_DECREF(result);
+}
+
+static int
+_PyImport_ReleaseLock(void)
+{
+ PyObject *result;
+ result = PyObject_CallMethod(imp_module, "release_lock", NULL);
+ if (result == NULL) {
+ fprintf(stderr, "imp.release_lock() failed.\n");
+ return -1;
+ }
+ Py_DECREF(result);
+ return 0;
+}
+#endif /* Python <= 2.5 */
+
+
+PyDoc_STRVAR(module_doc,
+"A POSIX helper for the subprocess module.");
+
+
+static PyMethodDef module_methods[] = {
+ {"fork_exec", subprocess_fork_exec, METH_VARARGS, subprocess_fork_exec_doc},
+ {"cloexec_pipe", subprocess_cloexec_pipe, METH_NOARGS, subprocess_cloexec_pipe_doc},
+ {NULL, NULL} /* sentinel */
+};
+
+
+PyMODINIT_FUNC
+init_posixsubprocess32(void)
+{
+ PyObject *m;
+
+#if (PY_VERSION_HEX < MIN_PY_VERSION_WITH_PYIMPORT_ACQUIRELOCK)
+ imp_module = PyImport_ImportModule("imp");
+ if (imp_module == NULL)
+ return;
+#endif
+
+ m = Py_InitModule3("_posixsubprocess32", module_methods, module_doc);
+ if (m == NULL)
+ return;
+}
diff --git a/contrib/deprecated/python/subprocess32/_posixsubprocess_config.h b/contrib/deprecated/python/subprocess32/_posixsubprocess_config.h
new file mode 100644
index 0000000000..0e13698f99
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/_posixsubprocess_config.h
@@ -0,0 +1,115 @@
+/* _posixsubprocess_config.h. Generated from _posixsubprocess_config.h.in by configure. */
+/* _posixsubprocess_config.h.in. Generated from configure.ac by autoheader. */
+
+/* Define to 1 if you have the <dirent.h> header file, and it defines `DIR'.
+ */
+#define HAVE_DIRENT_H 1
+
+/* Define if you have the 'dirfd' function or macro. */
+#define HAVE_DIRFD 1
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define HAVE_FCNTL_H 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
+/* #undef HAVE_NDIR_H */
+
+/* Define to 1 if you have the `pipe2' function. */
+#if defined(__linux__)
+#define HAVE_PIPE2 1
+#endif
+
+/* Define to 1 if you have the `setsid' function. */
+#define HAVE_SETSID 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <sys/cdefs.h> header file. */
+#define HAVE_SYS_CDEFS_H 1
+
+/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_DIR_H */
+
+/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
+ */
+/* #undef HAVE_SYS_NDIR_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/syscall.h> header file. */
+#define HAVE_SYS_SYSCALL_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "https://github.com/google/python-subprocess32/"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "_posixsubprocess32"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "_posixsubprocess32 3.5"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "_posixsubprocess32"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "3.5"
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define on OpenBSD to activate all library features */
+/* #undef _BSD_SOURCE */
+
+/* Define on Irix to enable u_int */
+#define _BSD_TYPES 1
+
+/* Define on Darwin to activate all library features */
+#define _DARWIN_C_SOURCE 1
+
+/* Define on Linux to activate all library features */
+#define _GNU_SOURCE 1
+
+/* Define on NetBSD to activate all library features */
+#define _NETBSD_SOURCE 1
+
+/* Define to activate features from IEEE Stds 1003.1-2008 */
+#define _POSIX_C_SOURCE 200809L
+
+/* Define to the level of X/Open that your system supports */
+#define _XOPEN_SOURCE 700
+
+/* Define to activate Unix95-and-earlier features */
+#define _XOPEN_SOURCE_EXTENDED 1
+
+/* Define on FreeBSD to activate all library features */
+#define __BSD_VISIBLE 1
diff --git a/contrib/deprecated/python/subprocess32/_posixsubprocess_helpers.c b/contrib/deprecated/python/subprocess32/_posixsubprocess_helpers.c
new file mode 100644
index 0000000000..73f6b6c05a
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/_posixsubprocess_helpers.c
@@ -0,0 +1,174 @@
+/* Functions and macros from Python 3.2 not found in 2.x.
+ This file is #included by _posixsubprocess.c and the functions
+ are declared static to avoid exposing them outside this module. */
+
+/* _posixsubprocess_config.h was already included by _posixsubprocess.c
+ * which is #include'ing us despite the .c name. HAVE_SIGNAL_H comes
+ * from there. Yes, confusing! */
+#ifdef HAVE_SIGNAL_H
+#include <signal.h>
+#endif
+#include "unicodeobject.h"
+
+#if (PY_VERSION_HEX < 0x02050000)
+#define Py_ssize_t int
+#endif
+
+#define Py_CLEANUP_SUPPORTED 0x20000
+
+/* Issue #1983: pid_t can be longer than a C long on some systems */
+#if !defined(SIZEOF_PID_T) || SIZEOF_PID_T == SIZEOF_INT
+#define PyLong_FromPid PyLong_FromLong
+#elif SIZEOF_PID_T == SIZEOF_LONG
+#define PyLong_FromPid PyLong_FromLong
+#elif defined(SIZEOF_LONG_LONG) && SIZEOF_PID_T == SIZEOF_LONG_LONG
+#define PyLong_FromPid PyLong_FromLongLong
+#else
+#error "sizeof(pid_t) is neither sizeof(int), sizeof(long) or sizeof(long long)"
+#endif /* SIZEOF_PID_T */
+
+
+static PyObject *PyUnicode_EncodeFSDefault(PyObject *unicode)
+{
+ if (Py_FileSystemDefaultEncoding)
+ return PyUnicode_AsEncodedString(unicode,
+ Py_FileSystemDefaultEncoding,
+ "strict");
+ else
+ return PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(unicode),
+ PyUnicode_GET_SIZE(unicode),
+ "strict");
+}
+
+
+/* Convert the argument to a bytes object, according to the file
+ system encoding. The addr param must be a PyObject**.
+ This is designed to be used with "O&" in PyArg_Parse APIs. */
+
+static int
+PyUnicode_FSConverter(PyObject* arg, void* addr)
+{
+ PyObject *output = NULL;
+ Py_ssize_t size;
+ void *data;
+ if (arg == NULL) {
+ Py_DECREF(*(PyObject**)addr);
+ return 1;
+ }
+ if (PyString_Check(arg)) {
+ output = arg;
+ Py_INCREF(output);
+ }
+ else {
+ arg = PyUnicode_FromObject(arg);
+ if (!arg)
+ return 0;
+ output = PyUnicode_EncodeFSDefault(arg);
+ Py_DECREF(arg);
+ if (!output)
+ return 0;
+ if (!PyString_Check(output)) {
+ Py_DECREF(output);
+ PyErr_SetString(PyExc_TypeError, "encoder failed to return bytes");
+ return 0;
+ }
+ }
+ size = PyString_GET_SIZE(output);
+ data = PyString_AS_STRING(output);
+ if (size != strlen(data)) {
+ PyErr_SetString(PyExc_TypeError, "embedded NUL character");
+ Py_DECREF(output);
+ return 0;
+ }
+ *(PyObject**)addr = output;
+ return Py_CLEANUP_SUPPORTED;
+}
+
+
+/* Free's a NULL terminated char** array of C strings. */
+static void
+_Py_FreeCharPArray(char *const array[])
+{
+ Py_ssize_t i;
+ for (i = 0; array[i] != NULL; ++i) {
+ free(array[i]);
+ }
+ free((void*)array);
+}
+
+
+/*
+ * Flatten a sequence of bytes() objects into a C array of
+ * NULL terminated string pointers with a NULL char* terminating the array.
+ * (ie: an argv or env list)
+ *
+ * Memory allocated for the returned list is allocated using malloc() and MUST
+ * be freed by the caller using a free() loop or _Py_FreeCharPArray().
+ */
+static char *const *
+_PySequence_BytesToCharpArray(PyObject* self)
+{
+ char **array;
+ Py_ssize_t i, argc;
+ PyObject *item = NULL;
+
+ argc = PySequence_Size(self);
+ if (argc == -1)
+ return NULL;
+ /* Avoid 32-bit overflows to malloc() from unreasonable values. */
+ if (argc > 0x10000000) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ array = malloc((argc + 1) * sizeof(char *));
+ if (array == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ for (i = 0; i < argc; ++i) {
+ char *data;
+ item = PySequence_GetItem(self, i);
+ data = PyString_AsString(item);
+ if (data == NULL) {
+ /* NULL terminate before freeing. */
+ array[i] = NULL;
+ goto fail;
+ }
+ array[i] = strdup(data);
+ if (!array[i]) {
+ PyErr_NoMemory();
+ goto fail;
+ }
+ Py_DECREF(item);
+ }
+ array[argc] = NULL;
+
+ return array;
+
+fail:
+ Py_XDECREF(item);
+ _Py_FreeCharPArray(array);
+ return NULL;
+}
+
+
+/* Restore signals that the interpreter has called SIG_IGN on to SIG_DFL.
+ *
+ * All of the code in this function must only use async-signal-safe functions,
+ * listed at `man 7 signal` or
+ * http://www.opengroup.org/onlinepubs/009695399/functions/xsh_chap02_04.html.
+ */
+static void
+_Py_RestoreSignals(void)
+{
+#ifdef SIGPIPE
+ PyOS_setsig(SIGPIPE, SIG_DFL);
+#endif
+#ifdef SIGXFZ
+ PyOS_setsig(SIGXFZ, SIG_DFL);
+#endif
+#ifdef SIGXFSZ
+ PyOS_setsig(SIGXFSZ, SIG_DFL);
+#endif
+}
diff --git a/contrib/deprecated/python/subprocess32/subprocess32.py b/contrib/deprecated/python/subprocess32/subprocess32.py
new file mode 100644
index 0000000000..8ab9b14fd0
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/subprocess32.py
@@ -0,0 +1,1752 @@
+# subprocess - Subprocesses with accessible I/O streams
+#
+# For more information about this module, see PEP 324.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/3.3/license for licensing details.
+
+r"""Subprocesses with accessible I/O streams
+
+This module allows you to spawn processes, connect to their
+input/output/error pipes, and obtain their return codes.
+
+For a complete description of this module see the Python documentation.
+
+Main API
+========
+run(...): Runs a command, waits for it to complete, then returns a
+ CompletedProcess instance.
+Popen(...): A class for flexibly executing a command in a new process
+
+Constants
+---------
+DEVNULL: Special value that indicates that os.devnull should be used
+PIPE: Special value that indicates a pipe should be created
+STDOUT: Special value that indicates that stderr should go to stdout
+
+
+Older API
+=========
+call(...): Runs a command, waits for it to complete, then returns
+ the return code.
+check_call(...): Same as call() but raises CalledProcessError()
+ if return code is not 0
+check_output(...): Same as check_call() but returns the contents of
+ stdout instead of a return code
+"""
+
+import sys
+mswindows = (sys.platform == "win32")
+
+import os
+import errno
+import exceptions
+import types
+import time
+import traceback
+import gc
+import signal
+
+# Exception classes used by this module.
+class SubprocessError(Exception): pass
+
+
+class CalledProcessError(SubprocessError):
+ """Raised when run() is called with check=True and the process
+ returns a non-zero exit status.
+
+ Attributes:
+ cmd, returncode, stdout, stderr, output
+ """
+ def __init__(self, returncode, cmd, output=None, stderr=None):
+ self.returncode = returncode
+ self.cmd = cmd
+ self.output = output
+ self.stderr = stderr
+ super(CalledProcessError, self).__init__(returncode, cmd,
+ output, stderr)
+
+ def __str__(self):
+ if self.returncode and self.returncode < 0:
+ return "Command '%s' died with signal %d." % (
+ self.cmd, -self.returncode)
+ else:
+ return "Command '%s' returned non-zero exit status %d." % (
+ self.cmd, self.returncode)
+
+ #@property
+ def __stdout_getter(self):
+ """Alias for output attribute, to match stderr"""
+ return self.output
+
+ #@stdout.setter # Required Python 2.6
+ def __stdout_setter(self, value):
+ # There's no obvious reason to set this, but allow it anyway so
+ # .stdout is a transparent alias for .output
+ self.output = value
+
+ stdout = property(__stdout_getter, __stdout_setter) # Python 2.4
+
+
+class TimeoutExpired(SubprocessError):
+ """This exception is raised when the timeout expires while waiting for a
+ child process.
+
+ Attributes:
+ cmd, output, stdout, stderr, timeout
+ """
+ def __init__(self, cmd, timeout, output=None, stderr=None):
+ self.cmd = cmd
+ self.timeout = timeout
+ self.output = output
+ self.stderr = stderr
+ super(TimeoutExpired, self).__init__(cmd, timeout, output, stderr)
+
+ def __str__(self):
+ return ("Command '%s' timed out after %s seconds" %
+ (self.cmd, self.timeout))
+
+ #@property
+ def __stdout_getter(self):
+ return self.output
+
+ #@stdout.setter # Required Python 2.6
+ def __stdout_setter(self, value):
+ # There's no obvious reason to set this, but allow it anyway so
+ # .stdout is a transparent alias for .output
+ self.output = value
+
+ stdout = property(__stdout_getter, __stdout_setter) # Python 2.4
+
+
+if mswindows:
+ import threading
+ import msvcrt
+ import _subprocess
+ class STARTUPINFO:
+ dwFlags = 0
+ hStdInput = None
+ hStdOutput = None
+ hStdError = None
+ wShowWindow = 0
+ class pywintypes:
+ error = IOError
+else:
+ import select
+ _has_poll = hasattr(select, 'poll')
+ import fcntl
+ import pickle
+
+ try:
+ import _posixsubprocess32 as _posixsubprocess
+ except ImportError:
+ _posixsubprocess = None
+ import warnings
+ warnings.warn("The _posixsubprocess module is not being used. "
+ "Child process reliability may suffer if your "
+ "program uses threads.", RuntimeWarning)
+ try:
+ import threading
+ except ImportError:
+ import dummy_threading as threading
+
+ # When select or poll has indicated that the file is writable,
+ # we can write up to _PIPE_BUF bytes without risk of blocking.
+ # POSIX defines PIPE_BUF as >= 512.
+ _PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
+
+ _FD_CLOEXEC = getattr(fcntl, 'FD_CLOEXEC', 1)
+
+ def _set_cloexec(fd, cloexec):
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ if cloexec:
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | _FD_CLOEXEC)
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD, old & ~_FD_CLOEXEC)
+
+ if _posixsubprocess:
+ _create_pipe = _posixsubprocess.cloexec_pipe
+ else:
+ def _create_pipe():
+ fds = os.pipe()
+ _set_cloexec(fds[0], True)
+ _set_cloexec(fds[1], True)
+ return fds
+
+__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
+ "check_output", "CalledProcessError"]
+
+if mswindows:
+ from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
+ STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
+ STD_ERROR_HANDLE, SW_HIDE,
+ STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx
+ # Note: In Python 3.3 this constant is found in the _winapi module.
+ _WAIT_TIMEOUT = 0x102
+
+ __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
+ "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
+ "STD_ERROR_HANDLE", "SW_HIDE",
+ "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except:
+ MAXFD = 256
+
+# This lists holds Popen instances for which the underlying process had not
+# exited at the time its __del__ method got called: those processes are wait()ed
+# for synchronously from _cleanup() when a new Popen object is created, to avoid
+# zombie processes.
+_active = []
+
+def _cleanup():
+ for inst in _active[:]:
+ res = inst._internal_poll(_deadstate=sys.maxint)
+ if res is not None:
+ try:
+ _active.remove(inst)
+ except ValueError:
+ # This can happen if two threads create a new Popen instance.
+ # It's harmless that it was already removed, so ignore.
+ pass
+
+PIPE = -1
+STDOUT = -2
+DEVNULL = -3
+
+# This function is only used by multiprocessing, it is here so that people
+# can drop subprocess32 in as a replacement for the stdlib subprocess module.
+
+def _args_from_interpreter_flags():
+ """Return a list of command-line arguments reproducing the current
+ settings in sys.flags and sys.warnoptions."""
+ flag_opt_map = {
+ 'debug': 'd',
+ # 'inspect': 'i',
+ # 'interactive': 'i',
+ 'optimize': 'O',
+ 'dont_write_bytecode': 'B',
+ 'no_user_site': 's',
+ 'no_site': 'S',
+ 'ignore_environment': 'E',
+ 'verbose': 'v',
+ 'bytes_warning': 'b',
+ 'py3k_warning': '3',
+ }
+ args = []
+ for flag, opt in flag_opt_map.items():
+ v = getattr(sys.flags, flag)
+ if v > 0:
+ args.append('-' + opt * v)
+ if getattr(sys.flags, 'hash_randomization') != 0:
+ args.append('-R')
+ for opt in sys.warnoptions:
+ args.append('-W' + opt)
+ return args
+
+
+def _eintr_retry_call(func, *args):
+ while True:
+ try:
+ return func(*args)
+ except (OSError, IOError), e:
+ if e.errno == errno.EINTR:
+ continue
+ raise
+
+
+def _get_exec_path(env=None):
+ """Returns the sequence of directories that will be searched for the
+ named executable (similar to a shell) when launching a process.
+
+ *env* must be an environment variable dict or None. If *env* is None,
+ os.environ will be used.
+ """
+ if env is None:
+ env = os.environ
+ return env.get('PATH', os.defpath).split(os.pathsep)
+
+
+if hasattr(os, 'get_exec_path'):
+ _get_exec_path = os.get_exec_path
+
+
+def call(*popenargs, **kwargs):
+ """Run command with arguments. Wait for command to complete or
+ timeout, then return the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+ """
+ timeout = kwargs.pop('timeout', None)
+ p = Popen(*popenargs, **kwargs)
+ try:
+ return p.wait(timeout=timeout)
+ except TimeoutExpired:
+ p.kill()
+ p.wait()
+ raise
+
+
+def check_call(*popenargs, **kwargs):
+ """Run command with arguments. Wait for command to complete. If
+ the exit code was zero then return, otherwise raise
+ CalledProcessError. The CalledProcessError object will have the
+ return code in the returncode attribute.
+
+ The arguments are the same as for the call function. Example:
+
+ check_call(["ls", "-l"])
+ """
+ retcode = call(*popenargs, **kwargs)
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise CalledProcessError(retcode, cmd)
+ return 0
+
+
+def check_output(*popenargs, **kwargs):
+ r"""Run command with arguments and return its output as a byte string.
+
+ If the exit code was non-zero it raises a CalledProcessError. The
+ CalledProcessError object will have the return code in the returncode
+ attribute and output in the output attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ >>> check_output(["ls", "-l", "/dev/null"])
+ 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
+
+ The stdout argument is not allowed as it is used internally.
+ To capture standard error in the result, use stderr=STDOUT.
+
+ >>> check_output(["/bin/sh", "-c",
+ ... "ls -l non_existent_file ; exit 0"],
+ ... stderr=STDOUT)
+ 'ls: non_existent_file: No such file or directory\n'
+ """
+ timeout = kwargs.pop('timeout', None)
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ process = Popen(stdout=PIPE, *popenargs, **kwargs)
+ try:
+ output, unused_err = process.communicate(timeout=timeout)
+ except TimeoutExpired:
+ process.kill()
+ output, unused_err = process.communicate()
+ raise TimeoutExpired(process.args, timeout, output=output)
+ retcode = process.poll()
+ if retcode:
+ raise CalledProcessError(retcode, process.args, output=output)
+ return output
+
+
+class CompletedProcess(object):
+ """A process that has finished running.
+ This is returned by run().
+ Attributes:
+ args: The list or str args passed to run().
+ returncode: The exit code of the process, negative for signals.
+ stdout: The standard output (None if not captured).
+ stderr: The standard error (None if not captured).
+ """
+ def __init__(self, args, returncode, stdout=None, stderr=None):
+ self.args = args
+ self.returncode = returncode
+ self.stdout = stdout
+ self.stderr = stderr
+
+ def __repr__(self):
+ args = ['args={!r}'.format(self.args),
+ 'returncode={!r}'.format(self.returncode)]
+ if self.stdout is not None:
+ args.append('stdout={!r}'.format(self.stdout))
+ if self.stderr is not None:
+ args.append('stderr={!r}'.format(self.stderr))
+ return "{}({})".format(type(self).__name__, ', '.join(args))
+
+ def check_returncode(self):
+ """Raise CalledProcessError if the exit code is non-zero."""
+ if self.returncode:
+ raise CalledProcessError(self.returncode, self.args, self.stdout,
+ self.stderr)
+
+
+def run(*popenargs, **kwargs):
+ """Run command with arguments and return a CompletedProcess instance.
+ The returned instance will have attributes args, returncode, stdout and
+ stderr. By default, stdout and stderr are not captured, and those attributes
+ will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
+ If check is True and the exit code was non-zero, it raises a
+ CalledProcessError. The CalledProcessError object will have the return code
+ in the returncode attribute, and output & stderr attributes if those streams
+ were captured.
+ If timeout is given, and the process takes too long, a TimeoutExpired
+ exception will be raised.
+ There is an optional argument "input", allowing you to
+ pass a string to the subprocess's stdin. If you use this argument
+ you may not also use the Popen constructor's "stdin" argument, as
+ it will be used internally.
+ The other arguments are the same as for the Popen constructor.
+ If universal_newlines=True is passed, the "input" argument must be a
+ string and stdout/stderr in the returned object will be strings rather than
+ bytes.
+ """
+ input = kwargs.pop('input', None)
+ timeout = kwargs.pop('timeout', None)
+ check = kwargs.pop('check', False)
+ if input is not None:
+ if 'stdin' in kwargs:
+ raise ValueError('stdin and input arguments may not both be used.')
+ kwargs['stdin'] = PIPE
+
+ process = Popen(*popenargs, **kwargs)
+ try:
+ process.__enter__() # No-Op really... illustrate "with in 2.4"
+ try:
+ stdout, stderr = process.communicate(input, timeout=timeout)
+ except TimeoutExpired:
+ process.kill()
+ stdout, stderr = process.communicate()
+ raise TimeoutExpired(process.args, timeout, output=stdout,
+ stderr=stderr)
+ except:
+ process.kill()
+ process.wait()
+ raise
+ retcode = process.poll()
+ if check and retcode:
+ raise CalledProcessError(retcode, process.args,
+ output=stdout, stderr=stderr)
+ finally:
+ # None because our context manager __exit__ does not use them.
+ process.__exit__(None, None, None)
+ return CompletedProcess(process.args, retcode, stdout, stderr)
+
+
+def list2cmdline(seq):
+ """
+ Translate a sequence of arguments into a command line
+ string, using the same rules as the MS C runtime:
+
+ 1) Arguments are delimited by white space, which is either a
+ space or a tab.
+
+ 2) A string surrounded by double quotation marks is
+ interpreted as a single argument, regardless of white space
+ contained within. A quoted string can be embedded in an
+ argument.
+
+ 3) A double quotation mark preceded by a backslash is
+ interpreted as a literal double quotation mark.
+
+ 4) Backslashes are interpreted literally, unless they
+ immediately precede a double quotation mark.
+
+ 5) If backslashes immediately precede a double quotation mark,
+ every pair of backslashes is interpreted as a literal
+ backslash. If the number of backslashes is odd, the last
+ backslash escapes the next double quotation mark as
+ described in rule 3.
+ """
+
+ # See
+ # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+ # or search http://msdn.microsoft.com for
+ # "Parsing C++ Command-Line Arguments"
+ result = []
+ needquote = False
+ for arg in seq:
+ bs_buf = []
+
+ # Add a space to separate this argument from the others
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg) or not arg
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ # Don't know if we need to double yet.
+ bs_buf.append(c)
+ elif c == '"':
+ # Double backslashes.
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ # Normal char
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ # Add remaining backslashes, if any.
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return ''.join(result)
+
+
+_PLATFORM_DEFAULT_CLOSE_FDS = object()
+
+
+class Popen(object):
+ def __init__(self, args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
+ shell=False, cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0,
+ restore_signals=True, start_new_session=False,
+ pass_fds=()):
+ """Create new Popen instance."""
+ _cleanup()
+ # Held while anything is calling waitpid before returncode has been
+ # updated to prevent clobbering returncode if wait() or poll() are
+ # called from multiple threads at once. After acquiring the lock,
+ # code must re-check self.returncode to see if another thread just
+ # finished a waitpid() call.
+ self._waitpid_lock = threading.Lock()
+
+ self._child_created = False
+ self._input = None
+ self._communication_started = False
+ if not isinstance(bufsize, (int, long)):
+ raise TypeError("bufsize must be an integer")
+
+ if mswindows:
+ if preexec_fn is not None:
+ raise ValueError("preexec_fn is not supported on Windows "
+ "platforms")
+ any_stdio_set = (stdin is not None or stdout is not None or
+ stderr is not None)
+ if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
+ if any_stdio_set:
+ close_fds = False
+ else:
+ close_fds = True
+ elif close_fds and any_stdio_set:
+ raise ValueError(
+ "close_fds is not supported on Windows platforms"
+ " if you redirect stdin/stdout/stderr")
+ else:
+ # POSIX
+ if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
+ close_fds = True
+ if pass_fds and not close_fds:
+ import warnings
+ warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
+ close_fds = True
+ if startupinfo is not None:
+ raise ValueError("startupinfo is only supported on Windows "
+ "platforms")
+ if creationflags != 0:
+ raise ValueError("creationflags is only supported on Windows "
+ "platforms")
+
+ self.args = args
+ self.stdin = None
+ self.stdout = None
+ self.stderr = None
+ self.pid = None
+ self.returncode = None
+ self.universal_newlines = universal_newlines
+
+ # Input and output objects. The general principle is like
+ # this:
+ #
+ # Parent Child
+ # ------ -----
+ # p2cwrite ---stdin---> p2cread
+ # c2pread <--stdout--- c2pwrite
+ # errread <--stderr--- errwrite
+ #
+ # On POSIX, the child objects are file descriptors. On
+ # Windows, these are Windows file handles. The parent objects
+ # are file descriptors on both platforms. The parent objects
+ # are -1 when not using PIPEs. The child objects are -1
+ # when not redirecting.
+
+ (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+
+ if mswindows:
+ if p2cwrite != -1:
+ p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
+ if c2pread != -1:
+ c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
+ if errread != -1:
+ errread = msvcrt.open_osfhandle(errread.Detach(), 0)
+
+ if p2cwrite != -1:
+ self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
+ if c2pread != -1:
+ if universal_newlines:
+ self.stdout = os.fdopen(c2pread, 'rU', bufsize)
+ else:
+ self.stdout = os.fdopen(c2pread, 'rb', bufsize)
+ if errread != -1:
+ if universal_newlines:
+ self.stderr = os.fdopen(errread, 'rU', bufsize)
+ else:
+ self.stderr = os.fdopen(errread, 'rb', bufsize)
+
+ self._closed_child_pipe_fds = False
+ exception_cleanup_needed = False
+ try:
+ try:
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ pass_fds, cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite,
+ restore_signals, start_new_session)
+ except:
+ # The cleanup is performed within the finally block rather
+ # than simply within this except block before the raise so
+ # that any exceptions raised and handled within it do not
+ # clobber the exception context we want to propagate upwards.
+ # This is only necessary in Python 2.
+ exception_cleanup_needed = True
+ raise
+ finally:
+ if exception_cleanup_needed:
+ for f in filter(None, (self.stdin, self.stdout, self.stderr)):
+ try:
+ f.close()
+ except EnvironmentError:
+ pass # Ignore EBADF or other errors
+
+ if not self._closed_child_pipe_fds:
+ to_close = []
+ if stdin == PIPE:
+ to_close.append(p2cread)
+ if stdout == PIPE:
+ to_close.append(c2pwrite)
+ if stderr == PIPE:
+ to_close.append(errwrite)
+ if hasattr(self, '_devnull'):
+ to_close.append(self._devnull)
+ for fd in to_close:
+ try:
+ os.close(fd)
+ except EnvironmentError:
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self.stdout:
+ self.stdout.close()
+ if self.stderr:
+ self.stderr.close()
+ if self.stdin:
+ self.stdin.close()
+ # Wait for the process to terminate, to avoid zombies.
+ self.wait()
+
+ def _translate_newlines(self, data):
+ data = data.replace("\r\n", "\n")
+ data = data.replace("\r", "\n")
+ return data
+
+
+ def __del__(self, _maxint=sys.maxint, _active=_active):
+ # If __init__ hasn't had a chance to execute (e.g. if it
+ # was passed an undeclared keyword argument), we don't
+ # have a _child_created attribute at all.
+ if not getattr(self, '_child_created', False):
+ # We didn't get to successfully create a child process.
+ return
+ # In case the child hasn't been waited on, check if it's done.
+ self._internal_poll(_deadstate=_maxint)
+ if self.returncode is None and _active is not None:
+ # Child is still running, keep us alive until we can wait on it.
+ _active.append(self)
+
+
+ def _get_devnull(self):
+ if not hasattr(self, '_devnull'):
+ self._devnull = os.open(os.devnull, os.O_RDWR)
+ return self._devnull
+
+ def _stdin_write(self, input):
+ if input:
+ try:
+ self.stdin.write(input)
+ except EnvironmentError as e:
+ if e.errno == errno.EPIPE:
+ # communicate() must ignore broken pipe error
+ pass
+ elif e.errno == errno.EINVAL :
+ # bpo-19612, bpo-30418: On Windows, stdin.write() fails
+ # with EINVAL if the child process exited or if the child
+ # process is still running but closed the pipe.
+ pass
+ else:
+ raise
+
+ try:
+ self.stdin.close()
+ except EnvironmentError as e:
+ if e.errno in (errno.EPIPE, errno.EINVAL):
+ pass
+ else:
+ raise
+
+ def communicate(self, input=None, timeout=None):
+ """Interact with process: Send data to stdin. Read data from
+ stdout and stderr, until end-of-file is reached. Wait for
+ process to terminate. The optional input argument should be a
+ string to be sent to the child process, or None, if no data
+ should be sent to the child.
+
+ communicate() returns a tuple (stdout, stderr)."""
+
+ if self._communication_started and input:
+ raise ValueError("Cannot send input after starting communication")
+
+ if timeout is not None:
+ endtime = time.time() + timeout
+ else:
+ endtime = None
+
+ # Optimization: If we are not worried about timeouts, we haven't
+ # started communicating, and we have one or zero pipes, using select()
+ # or threads is unnecessary.
+ if (endtime is None and not self._communication_started and
+ [self.stdin, self.stdout, self.stderr].count(None) >= 2):
+ stdout = None
+ stderr = None
+ if self.stdin:
+ self._stdin_write(input)
+ elif self.stdout:
+ stdout = _eintr_retry_call(self.stdout.read)
+ self.stdout.close()
+ elif self.stderr:
+ stderr = _eintr_retry_call(self.stderr.read)
+ self.stderr.close()
+ self.wait()
+ return (stdout, stderr)
+
+ try:
+ stdout, stderr = self._communicate(input, endtime, timeout)
+ finally:
+ self._communication_started = True
+
+ sts = self.wait(timeout=self._remaining_time(endtime))
+
+ return (stdout, stderr)
+
+
+ def poll(self):
+ return self._internal_poll()
+
+
+ def _remaining_time(self, endtime):
+ """Convenience for _communicate when computing timeouts."""
+ if endtime is None:
+ return None
+ else:
+ return endtime - time.time()
+
+
+ def _check_timeout(self, endtime, orig_timeout):
+ """Convenience for checking if a timeout has expired."""
+ if endtime is None:
+ return
+ if time.time() > endtime:
+ raise TimeoutExpired(self.args, orig_timeout)
+
+
+ if mswindows:
+ #
+ # Windows methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tuple with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ if stdin is None and stdout is None and stderr is None:
+ return (-1, -1, -1, -1, -1, -1)
+
+ p2cread, p2cwrite = -1, -1
+ c2pread, c2pwrite = -1, -1
+ errread, errwrite = -1, -1
+
+ if stdin is None:
+ p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
+ if p2cread is None:
+ p2cread, _ = _subprocess.CreatePipe(None, 0)
+ elif stdin == PIPE:
+ p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
+ elif stdin == DEVNULL:
+ p2cread = msvcrt.get_osfhandle(self._get_devnull())
+ elif isinstance(stdin, int):
+ p2cread = msvcrt.get_osfhandle(stdin)
+ else:
+ # Assuming file-like object
+ p2cread = msvcrt.get_osfhandle(stdin.fileno())
+ p2cread = self._make_inheritable(p2cread)
+
+ if stdout is None:
+ c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
+ if c2pwrite is None:
+ _, c2pwrite = _subprocess.CreatePipe(None, 0)
+ elif stdout == PIPE:
+ c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
+ elif stdout == DEVNULL:
+ c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
+ elif isinstance(stdout, int):
+ c2pwrite = msvcrt.get_osfhandle(stdout)
+ else:
+ # Assuming file-like object
+ c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
+ c2pwrite = self._make_inheritable(c2pwrite)
+
+ if stderr is None:
+ errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
+ if errwrite is None:
+ _, errwrite = _subprocess.CreatePipe(None, 0)
+ elif stderr == PIPE:
+ errread, errwrite = _subprocess.CreatePipe(None, 0)
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif stderr == DEVNULL:
+ errwrite = msvcrt.get_osfhandle(self._get_devnull())
+ elif isinstance(stderr, int):
+ errwrite = msvcrt.get_osfhandle(stderr)
+ else:
+ # Assuming file-like object
+ errwrite = msvcrt.get_osfhandle(stderr.fileno())
+ errwrite = self._make_inheritable(errwrite)
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _make_inheritable(self, handle):
+ """Return a duplicate of handle, which is inheritable"""
+ return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
+ handle, _subprocess.GetCurrentProcess(), 0, 1,
+ _subprocess.DUPLICATE_SAME_ACCESS)
+
+
+ def _find_w9xpopen(self):
+ """Find and return absolut path to w9xpopen.exe"""
+ w9xpopen = os.path.join(
+ os.path.dirname(_subprocess.GetModuleFileName(0)),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ # Eeek - file-not-found - possibly an embedding
+ # situation - see if we can locate it in sys.exec_prefix
+ w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ raise RuntimeError("Cannot locate w9xpopen.exe, which is "
+ "needed for Popen to work with your "
+ "shell or platform.")
+ return w9xpopen
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ pass_fds, cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite,
+ unused_restore_signals, unused_start_new_session):
+ """Execute program (MS Windows version)"""
+
+ assert not pass_fds, "pass_fds not supported on Windows."
+
+ if not isinstance(args, types.StringTypes):
+ args = list2cmdline(args)
+
+ # Process startup details
+ if startupinfo is None:
+ startupinfo = STARTUPINFO()
+ if -1 not in (p2cread, c2pwrite, errwrite):
+ startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
+ startupinfo.hStdInput = p2cread
+ startupinfo.hStdOutput = c2pwrite
+ startupinfo.hStdError = errwrite
+
+ if shell:
+ startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
+ startupinfo.wShowWindow = _subprocess.SW_HIDE
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + '"%s"' % args
+ if (_subprocess.GetVersion() >= 0x80000000L or
+ os.path.basename(comspec).lower() == "command.com"):
+ # Win9x, or using command.com on NT. We need to
+ # use the w9xpopen intermediate program. For more
+ # information, see KB Q150956
+ # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
+ w9xpopen = self._find_w9xpopen()
+ args = '"%s" %s' % (w9xpopen, args)
+ # Not passing CREATE_NEW_CONSOLE has been known to
+ # cause random failures on win9x. Specifically a
+ # dialog: "Your program accessed mem currently in
+ # use at xxx" and a hopeful warning about the
+ # stability of your system. Cost is Ctrl+C wont
+ # kill children.
+ creationflags |= _subprocess.CREATE_NEW_CONSOLE
+
+ # Start the process
+ try:
+ try:
+ hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
+ # no special security
+ None, None,
+ int(not close_fds),
+ creationflags,
+ env,
+ cwd,
+ startupinfo)
+ except pywintypes.error, e:
+ # Translate pywintypes.error to WindowsError, which is
+ # a subclass of OSError. FIXME: We should really
+ # translate errno using _sys_errlist (or similar), but
+ # how can this be done from Python?
+ raise WindowsError(*e.args)
+ finally:
+ # Child is launched. Close the parent's copy of those pipe
+ # handles that only the child should have open. You need
+ # to make sure that no handles to the write end of the
+ # output pipe are maintained in this process or else the
+ # pipe will not close when the child process exits and the
+ # ReadFile will hang.
+ if p2cread != -1:
+ p2cread.Close()
+ if c2pwrite != -1:
+ c2pwrite.Close()
+ if errwrite != -1:
+ errwrite.Close()
+ if hasattr(self, '_devnull'):
+ os.close(self._devnull)
+
+ # Retain the process handle, but close the thread handle
+ self._child_created = True
+ self._handle = hp
+ self.pid = pid
+ ht.Close()
+
+ def _internal_poll(self, _deadstate=None,
+ _WaitForSingleObject=_subprocess.WaitForSingleObject,
+ _WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
+ _GetExitCodeProcess=_subprocess.GetExitCodeProcess):
+ """Check if child process has terminated. Returns returncode
+ attribute.
+
+ This method is called by __del__, so it can only refer to objects
+ in its local scope.
+
+ """
+ if self.returncode is None:
+ if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
+ self.returncode = _GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def wait(self, timeout=None, endtime=None):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if endtime is not None:
+ timeout_millis = self._remaining_time(endtime)
+ if timeout is None:
+ timeout_millis = _subprocess.INFINITE
+ else:
+ timeout_millis = int(timeout * 1000)
+ if self.returncode is None:
+ result = _subprocess.WaitForSingleObject(self._handle,
+ timeout_millis)
+ if result == _WAIT_TIMEOUT:
+ raise TimeoutExpired(self.args, timeout)
+ self.returncode = _subprocess.GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def _readerthread(self, fh, buffer):
+ buffer.append(fh.read())
+ fh.close()
+
+
+ def _communicate(self, input, endtime, orig_timeout):
+ # Start reader threads feeding into a list hanging off of this
+ # object, unless they've already been started.
+ if self.stdout and not hasattr(self, "_stdout_buff"):
+ self._stdout_buff = []
+ self.stdout_thread = \
+ threading.Thread(target=self._readerthread,
+ args=(self.stdout, self._stdout_buff))
+ self.stdout_thread.daemon = True
+ self.stdout_thread.start()
+ if self.stderr and not hasattr(self, "_stderr_buff"):
+ self._stderr_buff = []
+ self.stderr_thread = \
+ threading.Thread(target=self._readerthread,
+ args=(self.stderr, self._stderr_buff))
+ self.stderr_thread.daemon = True
+ self.stderr_thread.start()
+
+ if self.stdin:
+ self._stdin_write(input)
+
+ # Wait for the reader threads, or time out. If we time out, the
+ # threads remain reading and the fds left open in case the user
+ # calls communicate again.
+ if self.stdout is not None:
+ self.stdout_thread.join(self._remaining_time(endtime))
+ if self.stdout_thread.isAlive():
+ raise TimeoutExpired(self.args, orig_timeout)
+ if self.stderr is not None:
+ self.stderr_thread.join(self._remaining_time(endtime))
+ if self.stderr_thread.isAlive():
+ raise TimeoutExpired(self.args, orig_timeout)
+
+ # Collect the output from and close both pipes, now that we know
+ # both have been read successfully.
+ stdout = None
+ stderr = None
+ if self.stdout:
+ stdout = self._stdout_buff
+ self.stdout.close()
+ if self.stderr:
+ stderr = self._stderr_buff
+ self.stderr.close()
+
+ # All data exchanged. Translate lists into strings.
+ if stdout is not None:
+ stdout = stdout[0]
+ if stderr is not None:
+ stderr = stderr[0]
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ return (stdout, stderr)
+
+ def send_signal(self, sig):
+ """Send a signal to the process."""
+ # Don't signal a process that we know has already died.
+ if self.returncode is not None:
+ return
+ if sig == signal.SIGTERM:
+ self.terminate()
+ elif sig == signal.CTRL_C_EVENT:
+ os.kill(self.pid, signal.CTRL_C_EVENT)
+ elif sig == signal.CTRL_BREAK_EVENT:
+ os.kill(self.pid, signal.CTRL_BREAK_EVENT)
+ else:
+ raise ValueError("Unsupported signal: %s" % sig)
+
+ def terminate(self):
+ """Terminates the process."""
+ # Don't terminate a process that we know has already died.
+ if self.returncode is not None:
+ return
+ _subprocess.TerminateProcess(self._handle, 1)
+
+ kill = terminate
+
+ else:
+ #
+ # POSIX methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tuple with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ p2cread, p2cwrite = -1, -1
+ c2pread, c2pwrite = -1, -1
+ errread, errwrite = -1, -1
+
+ if stdin is None:
+ pass
+ elif stdin == PIPE:
+ p2cread, p2cwrite = _create_pipe()
+ elif stdin == DEVNULL:
+ p2cread = self._get_devnull()
+ elif isinstance(stdin, int):
+ p2cread = stdin
+ else:
+ # Assuming file-like object
+ p2cread = stdin.fileno()
+
+ if stdout is None:
+ pass
+ elif stdout == PIPE:
+ c2pread, c2pwrite = _create_pipe()
+ elif stdout == DEVNULL:
+ c2pwrite = self._get_devnull()
+ elif isinstance(stdout, int):
+ c2pwrite = stdout
+ else:
+ # Assuming file-like object
+ c2pwrite = stdout.fileno()
+
+ if stderr is None:
+ pass
+ elif stderr == PIPE:
+ errread, errwrite = _create_pipe()
+ elif stderr == STDOUT:
+ if c2pwrite != -1:
+ errwrite = c2pwrite
+ else: # child's stdout is not set, use parent's stdout
+ errwrite = sys.__stdout__.fileno()
+ elif stderr == DEVNULL:
+ errwrite = self._get_devnull()
+ elif isinstance(stderr, int):
+ errwrite = stderr
+ else:
+ # Assuming file-like object
+ errwrite = stderr.fileno()
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ if hasattr(os, 'closerange'): # Introduced in 2.6
+ @staticmethod
+ def _closerange(fd_low, fd_high):
+ os.closerange(fd_low, fd_high)
+ else:
+ @staticmethod
+ def _closerange(fd_low, fd_high):
+ for fd in xrange(fd_low, fd_high):
+ while True:
+ try:
+ os.close(fd)
+ except (OSError, IOError), e:
+ if e.errno == errno.EINTR:
+ continue
+ break
+
+
+ def _close_fds(self, but):
+ self._closerange(3, but)
+ self._closerange(but + 1, MAXFD)
+
+
+ def _close_all_but_a_sorted_few_fds(self, fds_to_keep):
+ # precondition: fds_to_keep must be sorted and unique
+ start_fd = 3
+ for fd in fds_to_keep:
+ if fd >= start_fd:
+ self._closerange(start_fd, fd)
+ start_fd = fd + 1
+ if start_fd <= MAXFD:
+ self._closerange(start_fd, MAXFD)
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ pass_fds, cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite,
+ restore_signals, start_new_session):
+ """Execute program (POSIX version)"""
+
+ if isinstance(args, types.StringTypes):
+ args = [args]
+ else:
+ args = list(args)
+
+ if shell:
+ args = ["/bin/sh", "-c"] + args
+ if executable:
+ args[0] = executable
+
+ if executable is None:
+ executable = args[0]
+ orig_executable = executable
+
+ # For transferring possible exec failure from child to parent.
+ # Data format: "exception name:hex errno:description"
+ # Pickle is not used; it is complex and involves memory allocation.
+ errpipe_read, errpipe_write = _create_pipe()
+ try:
+ try:
+
+ if _posixsubprocess:
+ fs_encoding = sys.getfilesystemencoding()
+ def fs_encode(s):
+ """Encode s for use in the env, fs or cmdline."""
+ if isinstance(s, str):
+ return s
+ else:
+ return s.encode(fs_encoding, 'strict')
+
+ # We must avoid complex work that could involve
+ # malloc or free in the child process to avoid
+ # potential deadlocks, thus we do all this here.
+ # and pass it to fork_exec()
+
+ if env is not None:
+ env_list = [fs_encode(k) + '=' + fs_encode(v)
+ for k, v in env.items()]
+ else:
+ env_list = None # Use execv instead of execve.
+ if os.path.dirname(executable):
+ executable_list = (fs_encode(executable),)
+ else:
+ # This matches the behavior of os._execvpe().
+ path_list = _get_exec_path(env)
+ executable_list = (os.path.join(dir, executable)
+ for dir in path_list)
+ executable_list = tuple(fs_encode(exe)
+ for exe in executable_list)
+ fds_to_keep = set(pass_fds)
+ fds_to_keep.add(errpipe_write)
+ self.pid = _posixsubprocess.fork_exec(
+ args, executable_list,
+ close_fds, sorted(fds_to_keep), cwd, env_list,
+ p2cread, p2cwrite, c2pread, c2pwrite,
+ errread, errwrite,
+ errpipe_read, errpipe_write,
+ restore_signals, start_new_session, preexec_fn)
+ self._child_created = True
+ else:
+ # Pure Python implementation: It is not thread safe.
+ # This implementation may deadlock in the child if your
+ # parent process has any other threads running.
+
+ gc_was_enabled = gc.isenabled()
+ # Disable gc to avoid bug where gc -> file_dealloc ->
+ # write to stderr -> hang. See issue1336
+ gc.disable()
+ try:
+ self.pid = os.fork()
+ except:
+ if gc_was_enabled:
+ gc.enable()
+ raise
+ self._child_created = True
+ if self.pid == 0:
+ # Child
+ reached_preexec = False
+ try:
+ # Close parent's pipe ends
+ if p2cwrite != -1:
+ os.close(p2cwrite)
+ if c2pread != -1:
+ os.close(c2pread)
+ if errread != -1:
+ os.close(errread)
+ os.close(errpipe_read)
+
+ # When duping fds, if there arises a situation
+ # where one of the fds is either 0, 1 or 2, it
+ # is possible that it is overwritten (#12607).
+ if c2pwrite == 0:
+ c2pwrite = os.dup(c2pwrite)
+ if errwrite == 0 or errwrite == 1:
+ errwrite = os.dup(errwrite)
+
+ # Dup fds for child
+ def _dup2(a, b):
+ # dup2() removes the CLOEXEC flag but
+ # we must do it ourselves if dup2()
+ # would be a no-op (issue #10806).
+ if a == b:
+ _set_cloexec(a, False)
+ elif a != -1:
+ os.dup2(a, b)
+ _dup2(p2cread, 0)
+ _dup2(c2pwrite, 1)
+ _dup2(errwrite, 2)
+
+ # Close pipe fds. Make sure we don't close the
+ # same fd more than once, or standard fds.
+ closed = set()
+ for fd in [p2cread, c2pwrite, errwrite]:
+ if fd > 2 and fd not in closed:
+ os.close(fd)
+ closed.add(fd)
+
+ if cwd is not None:
+ os.chdir(cwd)
+
+ # This is a copy of Python/pythonrun.c
+ # _Py_RestoreSignals(). If that were exposed
+ # as a sys._py_restoresignals func it would be
+ # better.. but this pure python implementation
+ # isn't likely to be used much anymore.
+ if restore_signals:
+ signals = ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ')
+ for sig in signals:
+ if hasattr(signal, sig):
+ signal.signal(getattr(signal, sig),
+ signal.SIG_DFL)
+
+ if start_new_session and hasattr(os, 'setsid'):
+ os.setsid()
+
+ reached_preexec = True
+ if preexec_fn:
+ preexec_fn()
+
+ # Close all other fds, if asked for - after
+ # preexec_fn(), which may open FDs.
+ if close_fds:
+ if pass_fds:
+ fds_to_keep = set(pass_fds)
+ fds_to_keep.add(errpipe_write)
+ self._close_all_but_a_sorted_few_fds(
+ sorted(fds_to_keep))
+ else:
+ self._close_fds(but=errpipe_write)
+
+ if env is None:
+ os.execvp(executable, args)
+ else:
+ os.execvpe(executable, args, env)
+
+ except:
+ try:
+ exc_type, exc_value = sys.exc_info()[:2]
+ if isinstance(exc_value, OSError):
+ errno_num = exc_value.errno
+ else:
+ errno_num = 0
+ if not reached_preexec:
+ exc_value = "noexec"
+ message = '%s:%x:%s' % (exc_type.__name__,
+ errno_num, exc_value)
+ os.write(errpipe_write, message)
+ except Exception:
+ # We MUST not allow anything odd happening
+ # above to prevent us from exiting below.
+ pass
+
+ # This exitcode won't be reported to applications
+ # so it really doesn't matter what we return.
+ os._exit(255)
+
+ # Parent
+ if gc_was_enabled:
+ gc.enable()
+ finally:
+ # be sure the FD is closed no matter what
+ os.close(errpipe_write)
+
+ # A pair of non -1s means we created both fds and are
+ # responsible for closing them.
+ # self._devnull is not always defined.
+ devnull_fd = getattr(self, '_devnull', None)
+ if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
+ os.close(p2cread)
+ if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
+ os.close(c2pwrite)
+ if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
+ os.close(errwrite)
+ if devnull_fd is not None:
+ os.close(devnull_fd)
+ # Prevent a double close of these fds from __init__ on error.
+ self._closed_child_pipe_fds = True
+
+ # Wait for exec to fail or succeed; possibly raising exception
+ # exception (limited in size)
+ errpipe_data = ''
+ while True:
+ part = _eintr_retry_call(os.read, errpipe_read, 50000)
+ errpipe_data += part
+ if not part or len(errpipe_data) > 50000:
+ break
+ finally:
+ # be sure the FD is closed no matter what
+ os.close(errpipe_read)
+
+ if errpipe_data != "":
+ try:
+ _eintr_retry_call(os.waitpid, self.pid, 0)
+ except OSError, e:
+ if e.errno != errno.ECHILD:
+ raise
+ try:
+ exception_name, hex_errno, err_msg = (
+ errpipe_data.split(':', 2))
+ except ValueError:
+ exception_name = 'RuntimeError'
+ hex_errno = '0'
+ err_msg = ('Bad exception data from child: ' +
+ repr(errpipe_data))
+ child_exception_type = getattr(
+ exceptions, exception_name, RuntimeError)
+ if issubclass(child_exception_type, OSError) and hex_errno:
+ errno_num = int(hex_errno, 16)
+ child_exec_never_called = (err_msg == "noexec")
+ if child_exec_never_called:
+ err_msg = ""
+ if errno_num != 0:
+ err_msg = os.strerror(errno_num)
+ if errno_num == errno.ENOENT:
+ if child_exec_never_called:
+ # The error must be from chdir(cwd).
+ err_msg += ': ' + repr(cwd)
+ else:
+ err_msg += ': ' + repr(orig_executable)
+ raise child_exception_type(errno_num, err_msg)
+ try:
+ exception = child_exception_type(err_msg)
+ except Exception:
+ exception = RuntimeError(
+ 'Could not re-raise %r exception from the'
+ ' child with error message %r' %
+ (child_exception_type, err_msg))
+ raise exception
+
+
+ def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
+ _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
+ _WEXITSTATUS=os.WEXITSTATUS, _WIFSTOPPED=os.WIFSTOPPED,
+ _WSTOPSIG=os.WSTOPSIG):
+ """All callers to this function MUST hold self._waitpid_lock."""
+ # This method is called (indirectly) by __del__, so it cannot
+ # refer to anything outside of its local scope."""
+ if _WIFSIGNALED(sts):
+ self.returncode = -_WTERMSIG(sts)
+ elif _WIFEXITED(sts):
+ self.returncode = _WEXITSTATUS(sts)
+ elif _WIFSTOPPED(sts):
+ self.returncode = -_WSTOPSIG(sts)
+ else:
+ # Should never happen
+ raise RuntimeError("Unknown child exit status!")
+
+
+ def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
+ _WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
+ """Check if child process has terminated. Returns returncode
+ attribute.
+
+ This method is called by __del__, so it cannot reference anything
+ outside of the local scope (nor can any methods it calls).
+
+ """
+ if self.returncode is None:
+ if not self._waitpid_lock.acquire(False):
+ # Something else is busy calling waitpid. Don't allow two
+ # at once. We know nothing yet.
+ return None
+ try:
+ try:
+ if self.returncode is not None:
+ return self.returncode # Another thread waited.
+ pid, sts = _waitpid(self.pid, _WNOHANG)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ except _os_error, e:
+ if _deadstate is not None:
+ self.returncode = _deadstate
+ elif e.errno == _ECHILD:
+ # This happens if SIGCLD is set to be ignored or
+ # waiting for child processes has otherwise been
+ # disabled for our process. This child is dead, we
+ # can't get the status.
+ # http://bugs.python.org/issue15756
+ self.returncode = 0
+ finally:
+ self._waitpid_lock.release()
+ return self.returncode
+
+
+ def _try_wait(self, wait_flags):
+ """All callers to this function MUST hold self._waitpid_lock."""
+ try:
+ (pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
+ except OSError, e:
+ if e.errno != errno.ECHILD:
+ raise
+ # This happens if SIGCLD is set to be ignored or waiting
+ # for child processes has otherwise been disabled for our
+ # process. This child is dead, we can't get the status.
+ pid = self.pid
+ sts = 0
+ return (pid, sts)
+
+
+ def wait(self, timeout=None, endtime=None):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode is not None:
+ return self.returncode
+
+ # endtime is preferred to timeout. timeout is only used for
+ # printing.
+ if endtime is not None or timeout is not None:
+ if endtime is None:
+ endtime = time.time() + timeout
+ elif timeout is None:
+ timeout = self._remaining_time(endtime)
+
+ if endtime is not None:
+ # Enter a busy loop if we have a timeout. This busy loop was
+ # cribbed from Lib/threading.py in Thread.wait() at r71065.
+ delay = 0.0005 # 500 us -> initial delay of 1 ms
+ while True:
+ if self._waitpid_lock.acquire(False):
+ try:
+ if self.returncode is not None:
+ break # Another thread waited.
+ (pid, sts) = self._try_wait(os.WNOHANG)
+ assert pid == self.pid or pid == 0
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ break
+ finally:
+ self._waitpid_lock.release()
+ remaining = self._remaining_time(endtime)
+ if remaining <= 0:
+ raise TimeoutExpired(self.args, timeout)
+ delay = min(delay * 2, remaining, .05)
+ time.sleep(delay)
+ else:
+ while self.returncode is None:
+ self._waitpid_lock.acquire()
+ try:
+ if self.returncode is not None:
+ break # Another thread waited.
+ (pid, sts) = self._try_wait(0)
+ # Check the pid and loop as waitpid has been known to
+ # return 0 even without WNOHANG in odd situations.
+ # http://bugs.python.org/issue14396.
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ finally:
+ self._waitpid_lock.release()
+ return self.returncode
+
+
+ def _communicate(self, input, endtime, orig_timeout):
+ if self.stdin and not self._communication_started:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ self.stdin.flush()
+ if not input:
+ self.stdin.close()
+
+ if _has_poll:
+ stdout, stderr = self._communicate_with_poll(input, endtime,
+ orig_timeout)
+ else:
+ stdout, stderr = self._communicate_with_select(input, endtime,
+ orig_timeout)
+
+ self.wait(timeout=self._remaining_time(endtime))
+
+ # All data exchanged. Translate lists into strings.
+ if stdout is not None:
+ stdout = ''.join(stdout)
+ if stderr is not None:
+ stderr = ''.join(stderr)
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ return (stdout, stderr)
+
+
+ def _communicate_with_poll(self, input, endtime, orig_timeout):
+ stdout = None # Return
+ stderr = None # Return
+
+ if not self._communication_started:
+ self._fd2file = {}
+
+ poller = select.poll()
+ def register_and_append(file_obj, eventmask):
+ poller.register(file_obj.fileno(), eventmask)
+ self._fd2file[file_obj.fileno()] = file_obj
+
+ def close_unregister_and_remove(fd):
+ poller.unregister(fd)
+ self._fd2file[fd].close()
+ self._fd2file.pop(fd)
+
+ if self.stdin and input:
+ register_and_append(self.stdin, select.POLLOUT)
+
+ # Only create this mapping if we haven't already.
+ if not self._communication_started:
+ self._fd2output = {}
+ if self.stdout:
+ self._fd2output[self.stdout.fileno()] = []
+ if self.stderr:
+ self._fd2output[self.stderr.fileno()] = []
+
+ select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
+ if self.stdout:
+ register_and_append(self.stdout, select_POLLIN_POLLPRI)
+ stdout = self._fd2output[self.stdout.fileno()]
+ if self.stderr:
+ register_and_append(self.stderr, select_POLLIN_POLLPRI)
+ stderr = self._fd2output[self.stderr.fileno()]
+
+ # Save the input here so that if we time out while communicating,
+ # we can continue sending input if we retry.
+ if self.stdin and self._input is None:
+ self._input_offset = 0
+ self._input = input
+ if self.universal_newlines and isinstance(self._input, unicode):
+ self._input = self._input.encode(
+ self.stdin.encoding or sys.getdefaultencoding())
+
+ while self._fd2file:
+ try:
+ ready = poller.poll(self._remaining_time(endtime))
+ except select.error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ self._check_timeout(endtime, orig_timeout)
+
+ for fd, mode in ready:
+ if mode & select.POLLOUT:
+ chunk = self._input[self._input_offset :
+ self._input_offset + _PIPE_BUF]
+ self._input_offset += os.write(fd, chunk)
+ if self._input_offset >= len(self._input):
+ close_unregister_and_remove(fd)
+ elif mode & select_POLLIN_POLLPRI:
+ data = os.read(fd, 4096)
+ if not data:
+ close_unregister_and_remove(fd)
+ self._fd2output[fd].append(data)
+ else:
+ # Ignore hang up or errors.
+ close_unregister_and_remove(fd)
+
+ return (stdout, stderr)
+
+
+ def _communicate_with_select(self, input, endtime, orig_timeout):
+ if not self._communication_started:
+ self._read_set = []
+ self._write_set = []
+ if self.stdin and input:
+ self._write_set.append(self.stdin)
+ if self.stdout:
+ self._read_set.append(self.stdout)
+ if self.stderr:
+ self._read_set.append(self.stderr)
+
+ if self.stdin and self._input is None:
+ self._input_offset = 0
+ self._input = input
+ if self.universal_newlines and isinstance(self._input, unicode):
+ self._input = self._input.encode(
+ self.stdin.encoding or sys.getdefaultencoding())
+
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdout:
+ if not self._communication_started:
+ self._stdout_buff = []
+ stdout = self._stdout_buff
+ if self.stderr:
+ if not self._communication_started:
+ self._stderr_buff = []
+ stderr = self._stderr_buff
+
+ while self._read_set or self._write_set:
+ try:
+ (rlist, wlist, xlist) = \
+ select.select(self._read_set, self._write_set, [],
+ self._remaining_time(endtime))
+ except select.error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+
+ # According to the docs, returning three empty lists indicates
+ # that the timeout expired.
+ if not (rlist or wlist or xlist):
+ raise TimeoutExpired(self.args, orig_timeout)
+ # We also check what time it is ourselves for good measure.
+ self._check_timeout(endtime, orig_timeout)
+
+ if self.stdin in wlist:
+ chunk = self._input[self._input_offset :
+ self._input_offset + _PIPE_BUF]
+ try:
+ bytes_written = os.write(self.stdin.fileno(), chunk)
+ except EnvironmentError as e:
+ if e.errno == errno.EPIPE:
+ self._write_set.remove(self.stdin)
+ self.stdin.close()
+ else:
+ raise
+ else:
+ self._input_offset += bytes_written
+ if self._input_offset >= len(self._input):
+ self.stdin.close()
+ self._write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = os.read(self.stdout.fileno(), 1024)
+ if data == "":
+ self.stdout.close()
+ self._read_set.remove(self.stdout)
+ stdout.append(data)
+
+ if self.stderr in rlist:
+ data = os.read(self.stderr.fileno(), 1024)
+ if data == "":
+ self.stderr.close()
+ self._read_set.remove(self.stderr)
+ stderr.append(data)
+
+ return (stdout, stderr)
+
+
+ def send_signal(self, sig):
+ """Send a signal to the process
+ """
+ # Skip signalling a process that we know has already died.
+ if self.returncode is None:
+ os.kill(self.pid, sig)
+
+ def terminate(self):
+ """Terminate the process with SIGTERM
+ """
+ self.send_signal(signal.SIGTERM)
+
+ def kill(self):
+ """Kill the process with SIGKILL
+ """
+ self.send_signal(signal.SIGKILL)
diff --git a/contrib/deprecated/python/subprocess32/test_subprocess32.py b/contrib/deprecated/python/subprocess32/test_subprocess32.py
new file mode 100644
index 0000000000..bd4276a936
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/test_subprocess32.py
@@ -0,0 +1,2485 @@
+import unittest
+from test import test_support
+import subprocess32
+subprocess = subprocess32
+import sys
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+else:
+ import ctypes.util
+import signal
+import os
+import errno
+import tempfile
+import textwrap
+import time
+try:
+ import threading
+except ImportError:
+ threading = None
+import re
+#import sysconfig
+import select
+import shutil
+try:
+ import gc
+except ImportError:
+ gc = None
+import pickle
+
+mswindows = (sys.platform == "win32")
+yenv = '''
+import os
+os.environ['Y_PYTHON_ENTRY_POINT'] = ':main'
+'''
+
+#
+# Depends on the following external programs: Python
+#
+
+if mswindows:
+ SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
+ 'os.O_BINARY);')
+else:
+ SETBINARY = ''
+
+
+try:
+ mkstemp = tempfile.mkstemp
+except AttributeError:
+ # tempfile.mkstemp is not available
+ def mkstemp():
+ """Replacement for mkstemp, calling mktemp."""
+ fname = tempfile.mktemp()
+ return os.open(fname, os.O_RDWR|os.O_CREAT), fname
+
+try:
+ strip_python_stderr = test_support.strip_python_stderr
+except AttributeError:
+ # Copied from the test.test_support module in 2.7.
+ def strip_python_stderr(stderr):
+ """Strip the stderr of a Python process from potential debug output
+ emitted by the interpreter.
+
+ This will typically be run on the result of the communicate() method
+ of a subprocess.Popen object.
+ """
+ stderr = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr).strip()
+ return stderr
+
+class BaseTestCase(unittest.TestCase):
+ def setUp(self):
+ os.environ['Y_PYTHON_ENTRY_POINT'] = ':main'
+ # Try to minimize the number of children we have so this test
+ # doesn't crash on some buildbots (Alphas in particular).
+ reap_children()
+ if not hasattr(unittest.TestCase, 'addCleanup'):
+ self._cleanups = []
+
+ def tearDown(self):
+ try:
+ for inst in subprocess._active:
+ inst.wait()
+ subprocess._cleanup()
+ self.assertFalse(subprocess._active, "subprocess._active not empty")
+ finally:
+ if self._use_our_own_cleanup_implementation:
+ self._doCleanups()
+
+ if not hasattr(unittest.TestCase, 'assertIn'):
+ def assertIn(self, a, b, msg=None):
+ self.assert_((a in b), msg or ('%r not in %r' % (a, b)))
+ def assertNotIn(self, a, b, msg=None):
+ self.assert_((a not in b), msg or ('%r in %r' % (a, b)))
+
+ if not hasattr(unittest.TestCase, 'skipTest'):
+ def skipTest(self, message):
+ """These will still fail but it'll be clear that it is okay."""
+ self.fail('SKIPPED - %s\n' % (message,))
+
+ def _addCleanup(self, function, *args, **kwargs):
+ """Add a function, with arguments, to be called when the test is
+ completed. Functions added are called on a LIFO basis and are
+ called after tearDown on test failure or success.
+
+ Unlike unittest2 or python 2.7, cleanups are not if setUp fails.
+ That is easier to implement in this subclass and is all we need.
+ """
+ self._cleanups.append((function, args, kwargs))
+
+ def _doCleanups(self):
+ """Execute all cleanup functions. Normally called for you after
+ tearDown."""
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop()
+ try:
+ function(*args, **kwargs)
+ except KeyboardInterrupt:
+ raise
+ except:
+ pass
+
+ _use_our_own_cleanup_implementation = False
+ if not hasattr(unittest.TestCase, 'addCleanup'):
+ _use_our_own_cleanup_implementation = True
+ addCleanup = _addCleanup
+
+ def assertStderrEqual(self, stderr, expected, msg=None):
+ # In a debug build, stuff like "[6580 refs]" is printed to stderr at
+ # shutdown time. That frustrates tests trying to check stderr produced
+ # from a spawned Python process.
+ actual = strip_python_stderr(stderr)
+ # strip_python_stderr also strips whitespace, so we do too.
+ expected = expected.strip()
+ self.assertEqual(actual, expected, msg)
+
+
+class PopenTestException(Exception):
+ pass
+
+
+class PopenExecuteChildRaises(subprocess32.Popen):
+ """Popen subclass for testing cleanup of subprocess.PIPE filehandles when
+ _execute_child fails.
+ """
+ def _execute_child(self, *args, **kwargs):
+ raise PopenTestException("Forced Exception for Test")
+
+
+class ProcessTestCase(BaseTestCase):
+
+ def test_call_seq(self):
+ # call() function with sequence argument
+ rc = subprocess.call([sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"])
+ self.assertEqual(rc, 47)
+
+ def test_call_timeout(self):
+ # call() function with timeout argument; we want to test that the child
+ # process gets killed when the timeout expires. If the child isn't
+ # killed, this call will deadlock since subprocess.call waits for the
+ # child.
+ self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
+ [sys.executable, "-c", yenv + "while True: pass"],
+ timeout=0.1)
+
+ def test_check_call_zero(self):
+ # check_call() function with zero return code
+ rc = subprocess.check_call([sys.executable, "-c", yenv +
+ "import sys; sys.exit(0)"])
+ self.assertEqual(rc, 0)
+
+ def test_check_call_nonzero(self):
+ # check_call() function with non-zero return code
+ try:
+ subprocess.check_call([sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"])
+ except subprocess.CalledProcessError, c:
+ self.assertEqual(c.returncode, 47)
+
+ def test_check_output(self):
+ # check_output() function with zero return code
+ output = subprocess.check_output(
+ [sys.executable, "-c", yenv + "print 'BDFL'"])
+ self.assertIn('BDFL', output)
+
+ def test_check_output_nonzero(self):
+ # check_call() function with non-zero return code
+ try:
+ subprocess.check_output(
+ [sys.executable, "-c", yenv + "import sys; sys.exit(5)"])
+ except subprocess.CalledProcessError, c:
+ self.assertEqual(c.returncode, 5)
+
+ def test_check_output_stderr(self):
+ # check_output() function stderr redirected to stdout
+ output = subprocess.check_output(
+ [sys.executable, "-c", yenv + "import sys; sys.stderr.write('BDFL')"],
+ stderr=subprocess.STDOUT)
+ self.assertIn('BDFL', output)
+
+ def test_check_output_stdout_arg(self):
+ # check_output() function stderr redirected to stdout
+ try:
+ output = subprocess.check_output(
+ [sys.executable, "-c", yenv + "print 'will not be run'"],
+ stdout=sys.stdout)
+ self.fail("Expected ValueError when stdout arg supplied.")
+ except ValueError, c:
+ self.assertIn('stdout', c.args[0])
+
+ def test_check_output_timeout(self):
+ # check_output() function with timeout arg
+ try:
+ output = subprocess.check_output(
+ [sys.executable, "-c", yenv +
+ "import sys; sys.stdout.write('BDFL')\n"
+ "sys.stdout.flush()\n"
+ "while True: pass"],
+ timeout=0.5)
+ except subprocess.TimeoutExpired, exception:
+ self.assertEqual(exception.output, 'BDFL')
+ else:
+ self.fail("Expected TimeoutExpired.")
+
+ def test_call_kwargs(self):
+ # call() function with keyword args
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "banana"
+ rc = subprocess.call([sys.executable, "-c", yenv +
+ 'import sys, os;'
+ 'sys.exit(os.getenv("FRUIT")=="banana")'],
+ env=newenv)
+ self.assertEqual(rc, 1)
+
+ def test_stdin_none(self):
+ # .stdin is None when not redirected
+ p = subprocess.Popen([sys.executable, "-c", yenv + 'print "banana"'],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.wait()
+ self.assertEqual(p.stdin, None)
+
+ def test_stdout_none(self):
+ # .stdout is None when not redirected, and the child's stdout will
+ # be inherited from the parent. In order to test this we run a
+ # subprocess in a subprocess:
+ # this_test
+ # \-- subprocess created by this test (parent)
+ # \-- subprocess created by the parent subprocess (child)
+ # The parent doesn't specify stdout, so the child will use the
+ # parent's stdout. This test checks that the message printed by the
+ # child goes to the parent stdout. The parent also checks that the
+ # child's stdout is None. See #11963.
+ code = ('import sys; from subprocess32 import Popen, PIPE;'
+ 'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],'
+ ' stdin=PIPE, stderr=PIPE);'
+ 'p.wait(); assert p.stdout is None;')
+ p = subprocess.Popen([sys.executable, "-c", yenv + code],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ out, err = p.communicate()
+ self.assertEqual(p.returncode, 0, err)
+ self.assertEqual(out.rstrip(), 'test_stdout_none')
+
+ def test_stderr_none(self):
+ # .stderr is None when not redirected
+ p = subprocess.Popen([sys.executable, "-c", yenv + 'print "banana"'],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ p.wait()
+ self.assertEqual(p.stderr, None)
+
+ # For use in the test_cwd* tests below.
+ def _normalize_cwd(self, cwd):
+ # Normalize an expected cwd (for Tru64 support).
+ # We can't use os.path.realpath since it doesn't expand Tru64 {memb}
+ # strings. See bug #1063571.
+ original_cwd = os.getcwd()
+ os.chdir(cwd)
+ cwd = os.getcwd()
+ os.chdir(original_cwd)
+ return cwd
+
+ # For use in the test_cwd* tests below.
+ def _split_python_path(self):
+ # Return normalized (python_dir, python_base).
+ python_path = os.path.realpath(sys.executable)
+ return os.path.split(python_path)
+
+ # For use in the test_cwd* tests below.
+ def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
+ # Invoke Python via Popen, and assert that (1) the call succeeds,
+ # and that (2) the current working directory of the child process
+ # matches *expected_cwd*.
+ p = subprocess.Popen([python_arg, "-c", yenv +
+ "import os, sys; "
+ "sys.stdout.write(os.getcwd()); "
+ "sys.exit(47)"],
+ stdout=subprocess.PIPE,
+ **kwargs)
+ self.addCleanup(p.stdout.close)
+ p.wait()
+ self.assertEqual(47, p.returncode)
+ normcase = os.path.normcase
+ self.assertEqual(normcase(expected_cwd),
+ normcase(p.stdout.read().decode("utf-8")))
+
+ def test_cwd(self):
+ # Check that cwd changes the cwd for the child process.
+ temp_dir = tempfile.gettempdir()
+ temp_dir = self._normalize_cwd(temp_dir)
+ self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
+
+ if not mswindows: # pending resolution of issue #15533
+ def test_cwd_with_relative_arg(self):
+ # Check that Popen looks for args[0] relative to cwd if args[0]
+ # is relative.
+ python_dir, python_base = self._split_python_path()
+ rel_python = os.path.join(os.curdir, python_base)
+
+ path = 'tempcwd'
+ saved_dir = os.getcwd()
+ os.mkdir(path)
+ try:
+ os.chdir(path)
+ wrong_dir = os.getcwd()
+ # Before calling with the correct cwd, confirm that the call fails
+ # without cwd and with the wrong cwd.
+ self.assertRaises(OSError, subprocess.Popen,
+ [rel_python])
+ self.assertRaises(OSError, subprocess.Popen,
+ [rel_python], cwd=wrong_dir)
+ python_dir = self._normalize_cwd(python_dir)
+ self._assert_cwd(python_dir, rel_python, cwd=python_dir)
+ finally:
+ os.chdir(saved_dir)
+ shutil.rmtree(path)
+
+ def test_cwd_with_relative_executable(self):
+ # Check that Popen looks for executable relative to cwd if executable
+ # is relative (and that executable takes precedence over args[0]).
+ python_dir, python_base = self._split_python_path()
+ rel_python = os.path.join(os.curdir, python_base)
+ doesntexist = "somethingyoudonthave"
+
+ path = 'tempcwd'
+ saved_dir = os.getcwd()
+ os.mkdir(path)
+ try:
+ os.chdir(path)
+ wrong_dir = os.getcwd()
+ # Before calling with the correct cwd, confirm that the call fails
+ # without cwd and with the wrong cwd.
+ self.assertRaises(OSError, subprocess.Popen,
+ [doesntexist], executable=rel_python)
+ self.assertRaises(OSError, subprocess.Popen,
+ [doesntexist], executable=rel_python,
+ cwd=wrong_dir)
+ python_dir = self._normalize_cwd(python_dir)
+ self._assert_cwd(python_dir, doesntexist, executable=rel_python,
+ cwd=python_dir)
+ finally:
+ os.chdir(saved_dir)
+ shutil.rmtree(path)
+
+ def test_cwd_with_absolute_arg(self):
+ # Check that Popen can find the executable when the cwd is wrong
+ # if args[0] is an absolute path.
+ python_dir, python_base = self._split_python_path()
+ abs_python = os.path.join(python_dir, python_base)
+ rel_python = os.path.join(os.curdir, python_base)
+ wrong_dir = tempfile.mkdtemp()
+ wrong_dir = os.path.realpath(wrong_dir)
+ try:
+ # Before calling with an absolute path, confirm that using a
+ # relative path fails.
+ self.assertRaises(OSError, subprocess.Popen,
+ [rel_python], cwd=wrong_dir)
+ wrong_dir = self._normalize_cwd(wrong_dir)
+ self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
+ finally:
+ shutil.rmtree(wrong_dir)
+
+ def test_executable_with_cwd(self):
+ python_dir, python_base = self._split_python_path()
+ python_dir = self._normalize_cwd(python_dir)
+ self._assert_cwd(python_dir, "somethingyoudonthave",
+ executable=sys.executable, cwd=python_dir)
+
+ #@unittest.skipIf(sysconfig.is_python_build(),
+ # "need an installed Python. See #7774")
+ #def test_executable_without_cwd(self):
+ # # For a normal installation, it should work without 'cwd'
+ # # argument. For test runs in the build directory, see #7774.
+ # self._assert_cwd('', "somethingyoudonthave", executable=sys.executable)
+
+ def test_stdin_pipe(self):
+ # stdin redirection
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.exit(sys.stdin.read() == "pear")'],
+ stdin=subprocess.PIPE)
+ p.stdin.write("pear")
+ p.stdin.close()
+ p.wait()
+ self.assertEqual(p.returncode, 1)
+
+ def test_stdin_filedes(self):
+ # stdin is set to open file descriptor
+ tf = tempfile.TemporaryFile()
+ d = tf.fileno()
+ os.write(d, "pear")
+ os.lseek(d, 0, 0)
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.exit(sys.stdin.read() == "pear")'],
+ stdin=d)
+ p.wait()
+ self.assertEqual(p.returncode, 1)
+
+ def test_stdin_fileobj(self):
+ # stdin is set to open file object
+ tf = tempfile.TemporaryFile()
+ tf.write("pear")
+ tf.seek(0)
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.exit(sys.stdin.read() == "pear")'],
+ stdin=tf)
+ p.wait()
+ self.assertEqual(p.returncode, 1)
+
+ def test_stdout_pipe(self):
+ # stdout redirection
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stdout.write("orange")'],
+ stdout=subprocess.PIPE)
+ self.assertEqual(p.stdout.read(), "orange")
+
+ def test_stdout_filedes(self):
+ # stdout is set to open file descriptor
+ tf = tempfile.TemporaryFile()
+ d = tf.fileno()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stdout.write("orange")'],
+ stdout=d)
+ p.wait()
+ os.lseek(d, 0, 0)
+ self.assertEqual(os.read(d, 1024), "orange")
+
+ def test_stdout_fileobj(self):
+ # stdout is set to open file object
+ tf = tempfile.TemporaryFile()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stdout.write("orange")'],
+ stdout=tf)
+ p.wait()
+ tf.seek(0)
+ self.assertEqual(tf.read(), "orange")
+
+ def test_stderr_pipe(self):
+ # stderr redirection
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stderr.write("strawberry")'],
+ stderr=subprocess.PIPE)
+ self.assertStderrEqual(p.stderr.read(), "strawberry")
+
+ def test_stderr_filedes(self):
+ # stderr is set to open file descriptor
+ tf = tempfile.TemporaryFile()
+ d = tf.fileno()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stderr.write("strawberry")'],
+ stderr=d)
+ p.wait()
+ os.lseek(d, 0, 0)
+ self.assertStderrEqual(os.read(d, 1024), "strawberry")
+
+ def test_stderr_fileobj(self):
+ # stderr is set to open file object
+ tf = tempfile.TemporaryFile()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stderr.write("strawberry")'],
+ stderr=tf)
+ p.wait()
+ tf.seek(0)
+ self.assertStderrEqual(tf.read(), "strawberry")
+
+ def test_stderr_redirect_with_no_stdout_redirect(self):
+ # test stderr=STDOUT while stdout=None (not set)
+
+ # - grandchild prints to stderr
+ # - child redirects grandchild's stderr to its stdout
+ # - the parent should get grandchild's stderr in child's stdout
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys, subprocess32 as subprocess;'
+ 'rc = subprocess.call([sys.executable, "-c",'
+ ' "import sys;"'
+ ' "sys.stderr.write(\'42\')"],'
+ ' stderr=subprocess.STDOUT);'
+ 'sys.exit(rc)'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ #NOTE: stdout should get stderr from grandchild
+ self.assertStderrEqual(stdout, '42')
+ self.assertStderrEqual(stderr, '') # should be empty
+ self.assertEqual(p.returncode, 0)
+
+ def test_stdout_stderr_pipe(self):
+ # capture stdout and stderr to the same pipe
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys;'
+ 'sys.stdout.write("apple");'
+ 'sys.stdout.flush();'
+ 'sys.stderr.write("orange")'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ self.assertStderrEqual(p.stdout.read(), "appleorange")
+
+ def test_stdout_stderr_file(self):
+ # capture stdout and stderr to the same open file
+ tf = tempfile.TemporaryFile()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys;'
+ 'sys.stdout.write("apple");'
+ 'sys.stdout.flush();'
+ 'sys.stderr.write("orange")'],
+ stdout=tf,
+ stderr=tf)
+ p.wait()
+ tf.seek(0)
+ self.assertStderrEqual(tf.read(), "appleorange")
+
+ def test_stdout_filedes_of_stdout(self):
+ # stdout is set to 1 (#1531862).
+ # To avoid printing the text on stdout, we do something similar to
+ # test_stdout_none (see above). The parent subprocess calls the child
+ # subprocess passing stdout=1, and this test uses stdout=PIPE in
+ # order to capture and check the output of the parent. See #11963.
+ code = ('import sys, subprocess32; '
+ 'rc = subprocess32.call([sys.executable, "-c", '
+ ' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
+ '\'test with stdout=1\'))"], stdout=1); '
+ 'assert rc == 18')
+ p = subprocess.Popen([sys.executable, "-c", yenv + code],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ out, err = p.communicate()
+ self.assertEqual(p.returncode, 0, err)
+ self.assertEqual(out.rstrip(), 'test with stdout=1')
+
+ def test_stdout_devnull(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'for i in range(10240):'
+ 'print("x" * 1024)'],
+ stdout=subprocess.DEVNULL)
+ p.wait()
+ self.assertEqual(p.stdout, None)
+
+ def test_stderr_devnull(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys\n'
+ 'for i in range(10240):'
+ 'sys.stderr.write("x" * 1024)'],
+ stderr=subprocess.DEVNULL)
+ p.wait()
+ self.assertEqual(p.stderr, None)
+
+ def test_stdin_devnull(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys;'
+ 'sys.stdin.read(1)'],
+ stdin=subprocess.DEVNULL)
+ p.wait()
+ self.assertEqual(p.stdin, None)
+
+ def test_env(self):
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "orange"
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;'
+ 'sys.stdout.write(os.getenv("FRUIT"))'],
+ stdout=subprocess.PIPE,
+ env=newenv)
+ try:
+ stdout, stderr = p.communicate()
+ self.assertEqual(stdout, "orange")
+ finally:
+ p.__exit__(None, None, None)
+
+ def test_empty_env(self):
+ """test_empty_env() - verify that env={} is as empty as possible."""
+
+ def is_env_var_to_ignore(n):
+ """Determine if an environment variable is under our control."""
+ # This excludes some __CF_* and VERSIONER_* keys MacOS insists
+ # on adding even when the environment in exec is empty.
+ # Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
+ return ('VERSIONER' in n or '__CF' in n or # MacOS
+ n == 'LD_PRELOAD' or n.startswith('SANDBOX')) # Gentoo
+
+ p = subprocess.Popen(
+ [sys.executable, '-c',
+ 'import os; print(list(os.environ.keys()))'],
+ stdout=subprocess.PIPE, env={'Y_PYTHON_ENTRY_POINT': ':main'})
+ try:
+ stdout, stderr = p.communicate()
+ child_env_names = eval(stdout.strip())
+ self.assertTrue(isinstance(child_env_names, list),
+ msg=repr(child_env_names))
+ child_env_names = [k for k in child_env_names
+ if not is_env_var_to_ignore(k)]
+ self.assertEqual(child_env_names, [])
+ finally:
+ p.__exit__(None, None, None)
+
+ def test_communicate_stdin(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys;'
+ 'sys.exit(sys.stdin.read() == "pear")'],
+ stdin=subprocess.PIPE)
+ p.communicate("pear")
+ self.assertEqual(p.returncode, 1)
+
+ def test_communicate_stdout(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stdout.write("pineapple")'],
+ stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "pineapple")
+ self.assertEqual(stderr, None)
+
+ def test_communicate_stderr(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; sys.stderr.write("pineapple")'],
+ stderr=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, None)
+ self.assertStderrEqual(stderr, "pineapple")
+
+ def test_communicate(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;'
+ 'sys.stderr.write("pineapple");'
+ 'sys.stdout.write(sys.stdin.read())'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ (stdout, stderr) = p.communicate("banana")
+ self.assertEqual(stdout, "banana")
+ self.assertStderrEqual(stderr, "pineapple")
+
+ def test_communicate_timeout(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os,time;'
+ 'sys.stderr.write("pineapple\\n");'
+ 'time.sleep(1);'
+ 'sys.stderr.write("pear\\n");'
+ 'sys.stdout.write(sys.stdin.read())'],
+ universal_newlines=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.assertRaises(subprocess.TimeoutExpired, p.communicate, u"banana",
+ timeout=0.3)
+ # Make sure we can keep waiting for it, and that we get the whole output
+ # after it completes.
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, "banana")
+ self.assertStderrEqual(stderr.encode(), "pineapple\npear\n")
+
+ def test_communicate_timeout_large_ouput(self):
+ # Test a expring timeout while the child is outputting lots of data.
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os,time;'
+ 'sys.stdout.write("a" * (64 * 1024));'
+ 'time.sleep(0.2);'
+ 'sys.stdout.write("a" * (64 * 1024));'
+ 'time.sleep(0.2);'
+ 'sys.stdout.write("a" * (64 * 1024));'
+ 'time.sleep(0.2);'
+ 'sys.stdout.write("a" * (64 * 1024));'],
+ stdout=subprocess.PIPE)
+ self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
+ (stdout, _) = p.communicate()
+ self.assertEqual(len(stdout), 4 * 64 * 1024)
+
+ # Test for the fd leak reported in http://bugs.python.org/issue2791.
+ def test_communicate_pipe_fd_leak(self):
+ for stdin_pipe in (False, True):
+ for stdout_pipe in (False, True):
+ for stderr_pipe in (False, True):
+ options = {}
+ if stdin_pipe:
+ options['stdin'] = subprocess.PIPE
+ if stdout_pipe:
+ options['stdout'] = subprocess.PIPE
+ if stderr_pipe:
+ options['stderr'] = subprocess.PIPE
+ if not options:
+ continue
+ p = subprocess.Popen((sys.executable, "-c", yenv + "pass"), **options)
+ p.communicate()
+ if p.stdin is not None:
+ self.assertTrue(p.stdin.closed)
+ if p.stdout is not None:
+ self.assertTrue(p.stdout.closed)
+ if p.stderr is not None:
+ self.assertTrue(p.stderr.closed)
+
+ def test_communicate_returns(self):
+ # communicate() should return None if no redirection is active
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"])
+ (stdout, stderr) = p.communicate()
+ self.assertEqual(stdout, None)
+ self.assertEqual(stderr, None)
+
+ def test_communicate_pipe_buf(self):
+ # communicate() with writes larger than pipe_buf
+ # This test will probably deadlock rather than fail, if
+ # communicate() does not work properly.
+ x, y = os.pipe()
+ if mswindows:
+ pipe_buf = 512
+ else:
+ pipe_buf = os.fpathconf(x, "PC_PIPE_BUF")
+ os.close(x)
+ os.close(y)
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;'
+ 'sys.stdout.write(sys.stdin.read(47));'
+ 'sys.stderr.write("xyz"*%d);'
+ 'sys.stdout.write(sys.stdin.read())' % pipe_buf],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ string_to_write = "abc"*pipe_buf
+ (stdout, stderr) = p.communicate(string_to_write)
+ self.assertEqual(stdout, string_to_write)
+
+ def test_writes_before_communicate(self):
+ # stdin.write before communicate()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;'
+ 'sys.stdout.write(sys.stdin.read())'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ p.stdin.write("banana")
+ (stdout, stderr) = p.communicate("split")
+ self.assertEqual(stdout, "bananasplit")
+ self.assertStderrEqual(stderr, "")
+
+ def test_universal_newlines(self):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;' + SETBINARY +
+ 'sys.stdout.write("line1\\n");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("line2\\r");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("line3\\r\\n");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("line4\\r");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("\\nline5");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("\\nline6");'],
+ stdout=subprocess.PIPE,
+ universal_newlines=1)
+ stdout = p.stdout.read()
+ if hasattr(file, 'newlines'):
+ # Interpreter with universal newline support
+ self.assertEqual(stdout,
+ "line1\nline2\nline3\nline4\nline5\nline6")
+ else:
+ # Interpreter without universal newline support
+ self.assertEqual(stdout,
+ "line1\nline2\rline3\r\nline4\r\nline5\nline6")
+
+ def test_universal_newlines_communicate(self):
+ # universal newlines through communicate()
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;' + SETBINARY +
+ 'sys.stdout.write("line1\\n");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("line2\\r");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("line3\\r\\n");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("line4\\r");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("\\nline5");'
+ 'sys.stdout.flush();'
+ 'sys.stdout.write("\\nline6");'],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=1)
+ (stdout, stderr) = p.communicate()
+ if hasattr(file, 'newlines'):
+ # Interpreter with universal newline support
+ self.assertEqual(stdout,
+ "line1\nline2\nline3\nline4\nline5\nline6")
+ else:
+ # Interpreter without universal newline support
+ self.assertEqual(stdout,
+ "line1\nline2\rline3\r\nline4\r\nline5\nline6")
+
+ def test_universal_newlines_communicate_input_none(self):
+ # Test communicate(input=None) with universal newlines.
+ #
+ # We set stdout to PIPE because, as of this writing, a different
+ # code path is tested when the number of pipes is zero or one.
+ p = subprocess.Popen([sys.executable, "-c", yenv + "pass"],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
+ p.communicate()
+ self.assertEqual(p.returncode, 0)
+
+ def test_no_leaking(self):
+ # Make sure we leak no resources
+ if not hasattr(test_support, "is_resource_enabled") \
+ or test_support.is_resource_enabled("subprocess") and not mswindows:
+ max_handles = 1026 # too much for most UNIX systems
+ else:
+ max_handles = 65
+ for i in range(max_handles):
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ "import sys;sys.stdout.write(sys.stdin.read())"],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ data = p.communicate("lime")[0]
+ self.assertEqual(data, "lime")
+
+ def test_universal_newlines_communicate_stdin_stdout_stderr(self):
+ # universal newlines through communicate(), with stdin, stdout, stderr
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys,os;' + SETBINARY + '''\nif True:
+ s = sys.stdin.readline()
+ sys.stdout.write(s)
+ sys.stdout.write("line2\\r")
+ sys.stderr.write("eline2\\n")
+ s = sys.stdin.read()
+ sys.stdout.write(s)
+ sys.stdout.write("line4\\n")
+ sys.stdout.write("line5\\r\\n")
+ sys.stderr.write("eline6\\r")
+ sys.stderr.write("eline7\\r\\nz")
+ '''],
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ (stdout, stderr) = p.communicate(u"line1\nline3\n")
+ self.assertEqual(p.returncode, 0)
+ self.assertEqual(u"line1\nline2\nline3\nline4\nline5\n", stdout)
+ # Python debug build push something like "[42442 refs]\n"
+ # to stderr at exit of subprocess.
+ # Don't use assertStderrEqual because it strips CR and LF from output.
+ self.assertTrue(stderr.startswith(u"eline2\neline6\neline7\n"))
+
+ def test_list2cmdline(self):
+ self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
+ '"a b c" d e')
+ self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
+ 'ab\\"c \\ d')
+ self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
+ 'ab\\"c " \\\\" d')
+ self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
+ 'a\\\\\\b "de fg" h')
+ self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
+ 'a\\\\\\"b c d')
+ self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
+ '"a\\\\b c" d e')
+ self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
+ '"a\\\\b\\ c" d e')
+ self.assertEqual(subprocess.list2cmdline(['ab', '']),
+ 'ab ""')
+
+
+ def test_poll(self):
+ p = subprocess.Popen([sys.executable,
+ "-c", yenv + "import time; time.sleep(1)"])
+ count = 0
+ while p.poll() is None:
+ time.sleep(0.1)
+ count += 1
+ # We expect that the poll loop probably went around about 10 times,
+ # but, based on system scheduling we can't control, it's possible
+ # poll() never returned None. It "should be" very rare that it
+ # didn't go around at least twice.
+ self.assert_(count >= 2)
+ # Subsequent invocations should just return the returncode
+ self.assertEqual(p.poll(), 0)
+
+
+ def test_wait(self):
+ p = subprocess.Popen([sys.executable,
+ "-c", yenv + "import time; time.sleep(2)"])
+ self.assertEqual(p.wait(), 0)
+ # Subsequent invocations should just return the returncode
+ self.assertEqual(p.wait(), 0)
+
+
+ def test_wait_timeout(self):
+ p = subprocess.Popen([sys.executable,
+ "-c", yenv + "import time; time.sleep(0.1)"])
+ try:
+ p.wait(timeout=0.01)
+ except subprocess.TimeoutExpired, e:
+ self.assertIn("0.01", str(e)) # For coverage of __str__.
+ else:
+ self.fail("subprocess.TimeoutExpired expected but not raised.")
+ self.assertEqual(p.wait(timeout=2), 0)
+
+
+ def test_invalid_bufsize(self):
+ # an invalid type of the bufsize argument should raise
+ # TypeError.
+ try:
+ subprocess.Popen([sys.executable, "-c", yenv + "pass"], "orange")
+ except TypeError:
+ pass
+
+ def test_leaking_fds_on_error(self):
+ # see bug #5179: Popen leaks file descriptors to PIPEs if
+ # the child fails to execute; this will eventually exhaust
+ # the maximum number of open fds. 1024 seems a very common
+ # value for that limit, but Windows has 2048, so we loop
+ # 1024 times (each call leaked two fds).
+ for i in range(1024):
+ # Windows raises IOError. Others raise OSError.
+ try:
+ subprocess.Popen(['nonexisting_i_hope'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except EnvironmentError, c:
+ if c.errno != 2: # ignore "no such file"
+ raise
+
+ #@unittest.skipIf(threading is None, "threading required")
+ def test_threadsafe_wait(self):
+ """Issue21291: Popen.wait() needs to be threadsafe for returncode."""
+ proc = subprocess.Popen([sys.executable, '-c', yenv +
+ 'import time; time.sleep(12)'])
+ self.assertEqual(proc.returncode, None)
+ results = []
+
+ def kill_proc_timer_thread():
+ results.append(('thread-start-poll-result', proc.poll()))
+ # terminate it from the thread and wait for the result.
+ proc.kill()
+ proc.wait()
+ results.append(('thread-after-kill-and-wait', proc.returncode))
+ # this wait should be a no-op given the above.
+ proc.wait()
+ results.append(('thread-after-second-wait', proc.returncode))
+
+ # This is a timing sensitive test, the failure mode is
+ # triggered when both the main thread and this thread are in
+ # the wait() call at once. The delay here is to allow the
+ # main thread to most likely be blocked in its wait() call.
+ t = threading.Timer(0.2, kill_proc_timer_thread)
+ t.start()
+
+ if mswindows:
+ expected_errorcode = 1
+ else:
+ # Should be -9 because of the proc.kill() from the thread.
+ expected_errorcode = -9
+
+ # Wait for the process to finish; the thread should kill it
+ # long before it finishes on its own. Supplying a timeout
+ # triggers a different code path for better coverage.
+ proc.wait(timeout=20)
+ self.assertEqual(proc.returncode, expected_errorcode,
+ msg="unexpected result in wait from main thread")
+
+ # This should be a no-op with no change in returncode.
+ proc.wait()
+ self.assertEqual(proc.returncode, expected_errorcode,
+ msg="unexpected result in second main wait.")
+
+ t.join()
+ # Ensure that all of the thread results are as expected.
+ # When a race condition occurs in wait(), the returncode could
+ # be set by the wrong thread that doesn't actually have it
+ # leading to an incorrect value.
+ self.assertEqual([('thread-start-poll-result', None),
+ ('thread-after-kill-and-wait', expected_errorcode),
+ ('thread-after-second-wait', expected_errorcode)],
+ results)
+
+ def test_issue8780(self):
+ # Ensure that stdout is inherited from the parent
+ # if stdout=PIPE is not used
+ code = ';'.join((
+ 'import subprocess32, sys',
+ 'retcode = subprocess32.call('
+ "[sys.executable, '-c', 'print(\"Hello World!\")'])",
+ 'assert retcode == 0'))
+ output = subprocess.check_output([sys.executable, '-c', yenv + code])
+ self.assert_(output.startswith('Hello World!'), output)
+
+ def test_communicate_epipe(self):
+ # Issue 10963: communicate() should hide EPIPE
+ p = subprocess.Popen([sys.executable, "-c", yenv + 'pass'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ self.addCleanup(p.stdin.close)
+ p.communicate(b"x" * 2**20)
+
+ def test_communicate_epipe_only_stdin(self):
+ # Issue 10963: communicate() should hide EPIPE
+ p = subprocess.Popen([sys.executable, "-c", yenv + 'pass'],
+ stdin=subprocess.PIPE)
+ self.addCleanup(p.stdin.close)
+ p.wait()
+ p.communicate(b"x" * 2**20)
+
+ if not mswindows: # Signal tests are POSIX specific.
+ def test_communicate_eintr(self):
+ # Issue #12493: communicate() should handle EINTR
+ def handler(signum, frame):
+ pass
+ old_handler = signal.signal(signal.SIGALRM, handler)
+ self.addCleanup(signal.signal, signal.SIGALRM, old_handler)
+
+ # the process is running for 2 seconds
+ args = [sys.executable, "-c", yenv + 'import time; time.sleep(2)']
+ for stream in ('stdout', 'stderr'):
+ kw = {stream: subprocess.PIPE}
+ process = subprocess.Popen(args, **kw)
+ try:
+ signal.alarm(1)
+ # communicate() will be interrupted by SIGALRM
+ process.communicate()
+ finally:
+ process.__exit__(None, None, None)
+
+
+ # This test is Linux-ish specific for simplicity to at least have
+ # some coverage. It is not a platform specific bug.
+ #@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
+ # "Linux specific")
+ def test_failed_child_execute_fd_leak(self):
+ """Test for the fork() failure fd leak reported in issue16327."""
+ if not os.path.isdir('/proc/%d/fd' % os.getpid()):
+ self.skipTest("Linux specific")
+ fd_directory = '/proc/%d/fd' % os.getpid()
+ fds_before_popen = os.listdir(fd_directory)
+ try:
+ PopenExecuteChildRaises(
+ [sys.executable, '-c', yenv + 'pass'], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except PopenTestException:
+ pass # Yay! Because 2.4 doesn't support with statements.
+ else:
+ self.fail("PopenTestException expected but not raised.")
+
+ # NOTE: This test doesn't verify that the real _execute_child
+ # does not close the file descriptors itself on the way out
+ # during an exception. Code inspection has confirmed that.
+
+ fds_after_exception = os.listdir(fd_directory)
+ self.assertEqual(fds_before_popen, fds_after_exception)
+
+
+class RunFuncTestCase(BaseTestCase):
+ def run_python(self, code, **kwargs):
+ """Run Python code in a subprocess using subprocess.run"""
+ argv = [sys.executable, "-c", yenv + code]
+ return subprocess.run(argv, **kwargs)
+
+ def test_returncode(self):
+ # call() function with sequence argument
+ cp = self.run_python("import sys; sys.exit(47)")
+ self.assertEqual(cp.returncode, 47)
+ try:
+ cp.check_returncode()
+ except subprocess.CalledProcessError:
+ pass
+ else:
+ self.fail("CalledProcessError not raised")
+
+ def test_check(self):
+ try:
+ self.run_python("import sys; sys.exit(47)", check=True)
+ except subprocess.CalledProcessError, exception:
+ self.assertEqual(exception.returncode, 47)
+ else:
+ self.fail("CalledProcessError not raised")
+
+ def test_check_zero(self):
+ # check_returncode shouldn't raise when returncode is zero
+ cp = self.run_python("import sys; sys.exit(0)", check=True)
+ self.assertEqual(cp.returncode, 0)
+
+ def test_timeout(self):
+ # run() function with timeout argument; we want to test that the child
+ # process gets killed when the timeout expires. If the child isn't
+ # killed, this call will deadlock since subprocess.run waits for the
+ # child.
+ try:
+ self.run_python("while True: pass", timeout=0.0001)
+ except subprocess.TimeoutExpired:
+ pass
+ else:
+ self.fail("TimeoutExpired not raised")
+
+ def test_capture_stdout(self):
+ # capture stdout with zero return code
+ cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
+ self.assertIn('BDFL', cp.stdout)
+
+ def test_capture_stderr(self):
+ cp = self.run_python("import sys; sys.stderr.write('BDFL')",
+ stderr=subprocess.PIPE)
+ self.assertIn('BDFL', cp.stderr)
+
+ def test_check_output_stdin_arg(self):
+ # run() can be called with stdin set to a file
+ tf = tempfile.TemporaryFile()
+ self.addCleanup(tf.close)
+ tf.write('pear')
+ tf.seek(0)
+ cp = self.run_python(
+ "import sys; sys.stdout.write(sys.stdin.read().upper())",
+ stdin=tf, stdout=subprocess.PIPE)
+ self.assertIn('PEAR', cp.stdout)
+
+ def test_check_output_input_arg(self):
+ # check_output() can be called with input set to a string
+ cp = self.run_python(
+ "import sys; sys.stdout.write(sys.stdin.read().upper())",
+ input='pear', stdout=subprocess.PIPE)
+ self.assertIn('PEAR', cp.stdout)
+
+ def test_check_output_stdin_with_input_arg(self):
+ # run() refuses to accept 'stdin' with 'input'
+ tf = tempfile.TemporaryFile()
+ self.addCleanup(tf.close)
+ tf.write('pear')
+ tf.seek(0)
+ try:
+ output = self.run_python("print('will not be run')",
+ stdin=tf, input='hare')
+ except ValueError, exception:
+ self.assertIn('stdin', exception.args[0])
+ self.assertIn('input', exception.args[0])
+ else:
+ self.fail("Expected ValueError when stdin and input args supplied.")
+
+ def test_check_output_timeout(self):
+ try:
+ cp = self.run_python((
+ "import sys, time\n"
+ "sys.stdout.write('BDFL')\n"
+ "sys.stdout.flush()\n"
+ "time.sleep(3600)"),
+ # Some heavily loaded buildbots (sparc Debian 3.x) require
+ # this much time to start and print.
+ timeout=3, stdout=subprocess.PIPE)
+ except subprocess.TimeoutExpired, exception:
+ self.assertEqual(exception.output, 'BDFL')
+ # output is aliased to stdout
+ self.assertEqual(exception.stdout, 'BDFL')
+ else:
+ self.fail("TimeoutExpired not raised")
+
+ def test_run_kwargs(self):
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "banana"
+ cp = self.run_python(('import sys, os;'
+ 'os.getenv("FRUIT")=="banana" and sys.exit(33) or sys.exit(31)'),
+ env=newenv)
+ self.assertEqual(cp.returncode, 33)
+
+
+# context manager
+class _SuppressCoreFiles(object):
+ """Try to prevent core files from being created."""
+ old_limit = None
+
+ def __enter__(self):
+ """Try to save previous ulimit, then set it to (0, 0)."""
+ try:
+ import resource
+ self.old_limit = resource.getrlimit(resource.RLIMIT_CORE)
+ resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
+ except (ImportError, ValueError, resource.error):
+ pass
+
+ def __exit__(self, *args):
+ """Return core file behavior to default."""
+ if self.old_limit is None:
+ return
+ try:
+ import resource
+ resource.setrlimit(resource.RLIMIT_CORE, self.old_limit)
+ except (ImportError, ValueError, resource.error):
+ pass
+
+
+#@unittest.skipIf(mswindows, "POSIX specific tests")
+class POSIXProcessTestCase(BaseTestCase):
+
+ def setUp(self):
+ BaseTestCase.setUp(self)
+ self._nonexistent_dir = "/_this/pa.th/does/not/exist"
+
+ def _get_chdir_exception(self):
+ try:
+ os.chdir(self._nonexistent_dir)
+ except OSError, e:
+ # This avoids hard coding the errno value or the OS perror()
+ # string and instead capture the exception that we want to see
+ # below for comparison.
+ desired_exception = e
+ desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
+ else:
+ self.fail("chdir to nonexistant directory %s succeeded." %
+ self._nonexistent_dir)
+ return desired_exception
+
+ def test_exception_cwd(self):
+ """Test error in the child raised in the parent for a bad cwd."""
+ desired_exception = self._get_chdir_exception()
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv + ""],
+ cwd=self._nonexistent_dir)
+ except OSError, e:
+ # Test that the child process chdir failure actually makes
+ # it up to the parent process as the correct exception.
+ self.assertEqual(desired_exception.errno, e.errno)
+ self.assertEqual(desired_exception.strerror, e.strerror)
+ else:
+ self.fail("Expected OSError: %s" % desired_exception)
+
+ def test_exception_bad_executable(self):
+ """Test error in the child raised in the parent for a bad executable."""
+ desired_exception = self._get_chdir_exception()
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv + ""],
+ executable=self._nonexistent_dir)
+ except OSError, e:
+ # Test that the child process exec failure actually makes
+ # it up to the parent process as the correct exception.
+ self.assertEqual(desired_exception.errno, e.errno)
+ self.assertEqual(desired_exception.strerror, e.strerror)
+ else:
+ self.fail("Expected OSError: %s" % desired_exception)
+
+ def test_exception_bad_args_0(self):
+ """Test error in the child raised in the parent for a bad args[0]."""
+ desired_exception = self._get_chdir_exception()
+ try:
+ p = subprocess.Popen([self._nonexistent_dir, "-c", yenv + ""])
+ except OSError, e:
+ # Test that the child process exec failure actually makes
+ # it up to the parent process as the correct exception.
+ self.assertEqual(desired_exception.errno, e.errno)
+ self.assertEqual(desired_exception.strerror, e.strerror)
+ else:
+ self.fail("Expected OSError: %s" % desired_exception)
+
+ #@unittest.skipIf(not os.path.exists('/proc/self/status'))
+ def test_restore_signals(self):
+ if not os.path.exists('/proc/self/status'):
+ print("SKIP - Functional test requires /proc/self/status.")
+ return
+ # Blindly assume that cat exists on systems with /proc/self/status...
+ default_proc_status = subprocess.check_output(
+ ['cat', '/proc/self/status'],
+ restore_signals=False)
+ for line in default_proc_status.splitlines():
+ if line.startswith(b'SigIgn'):
+ default_sig_ign_mask = line
+ break
+ else:
+ self.skipTest("SigIgn not found in /proc/self/status.")
+ restored_proc_status = subprocess.check_output(
+ ['cat', '/proc/self/status'],
+ restore_signals=True)
+ for line in restored_proc_status.splitlines():
+ if line.startswith(b'SigIgn'):
+ restored_sig_ign_mask = line
+ break
+ # restore_signals=True should've unblocked SIGPIPE and friends.
+ self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask)
+
+ def test_start_new_session(self):
+ # For code coverage of calling setsid(). We don't care if we get an
+ # EPERM error from it depending on the test execution environment, that
+ # still indicates that it was called.
+ try:
+ output = subprocess.check_output(
+ [sys.executable, "-c", yenv +
+ "import os; print(os.getpgid(os.getpid()))"],
+ start_new_session=True)
+ except OSError, e:
+ if e.errno != errno.EPERM:
+ raise
+ else:
+ parent_pgid = os.getpgid(os.getpid())
+ child_pgid = int(output)
+ self.assertNotEqual(parent_pgid, child_pgid)
+
+ def test_run_abort(self):
+ # returncode handles signal termination
+ scf = _SuppressCoreFiles()
+ scf.__enter__()
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ "import os; os.abort()"])
+ p.wait()
+ finally:
+ scf.__exit__()
+ self.assertEqual(-p.returncode, signal.SIGABRT)
+
+ def test_preexec(self):
+ # DISCLAIMER: Setting environment variables is *not* a good use
+ # of a preexec_fn. This is merely a test.
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ "import sys, os;"
+ "sys.stdout.write(os.getenv('FRUIT'))"],
+ stdout=subprocess.PIPE,
+ preexec_fn=lambda: os.putenv("FRUIT", "apple"))
+ self.assertEqual(p.stdout.read(), "apple")
+
+ def test_preexec_exception(self):
+ def raise_it():
+ raise ValueError("What if two swallows carried a coconut?")
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv + ""],
+ preexec_fn=raise_it)
+ except RuntimeError, e:
+ self.assertTrue(
+ subprocess._posixsubprocess,
+ "Expected a ValueError from the preexec_fn")
+ except ValueError, e:
+ self.assertIn("coconut", e.args[0])
+ else:
+ self.fail("Exception raised by preexec_fn did not make it "
+ "to the parent process.")
+
+ class _TestExecuteChildPopen(subprocess.Popen):
+ """Used to test behavior at the end of _execute_child."""
+ def __init__(self, testcase, *args, **kwargs):
+ self._testcase = testcase
+ subprocess.Popen.__init__(self, *args, **kwargs)
+
+ def _execute_child(self, *args, **kwargs):
+ try:
+ subprocess.Popen._execute_child(self, *args, **kwargs)
+ finally:
+ # Open a bunch of file descriptors and verify that
+ # none of them are the same as the ones the Popen
+ # instance is using for stdin/stdout/stderr.
+ devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
+ for _ in range(8)]
+ try:
+ for fd in devzero_fds:
+ self._testcase.assertNotIn(
+ fd, (self.stdin.fileno(), self.stdout.fileno(),
+ self.stderr.fileno()),
+ msg="At least one fd was closed early.")
+ finally:
+ map(os.close, devzero_fds)
+
+ #@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
+ def test_preexec_errpipe_does_not_double_close_pipes(self):
+ """Issue16140: Don't double close pipes on preexec error."""
+
+ def raise_it():
+ raise RuntimeError("force the _execute_child() errpipe_data path.")
+
+ try:
+ self._TestExecuteChildPopen(
+ self, [sys.executable, "-c", yenv + "pass"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, preexec_fn=raise_it)
+ except RuntimeError:
+ pass # Yay! Because 2.4 doesn't support with statements.
+ else:
+ self.fail("RuntimeError expected but not raised.")
+
+ #@unittest.skipUnless(gc, "Requires a gc module.")
+ def test_preexec_gc_module_failure(self):
+ # This tests the code that disables garbage collection if the child
+ # process will execute any Python.
+ def raise_runtime_error():
+ raise RuntimeError("this shouldn't escape")
+ enabled = gc.isenabled()
+ orig_gc_disable = gc.disable
+ orig_gc_isenabled = gc.isenabled
+ try:
+ gc.disable()
+ self.assertFalse(gc.isenabled())
+ subprocess.call([sys.executable, '-c', yenv + ''],
+ preexec_fn=lambda: None)
+ self.assertFalse(gc.isenabled(),
+ "Popen enabled gc when it shouldn't.")
+
+ gc.enable()
+ self.assertTrue(gc.isenabled())
+ subprocess.call([sys.executable, '-c', yenv + ''],
+ preexec_fn=lambda: None)
+ self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
+
+ gc.disable = raise_runtime_error
+ self.assertRaises(RuntimeError, subprocess.Popen,
+ [sys.executable, '-c', yenv + ''],
+ preexec_fn=lambda: None)
+
+ del gc.isenabled # force an AttributeError
+ self.assertRaises(AttributeError, subprocess.Popen,
+ [sys.executable, '-c', yenv + ''],
+ preexec_fn=lambda: None)
+ finally:
+ gc.disable = orig_gc_disable
+ gc.isenabled = orig_gc_isenabled
+ if not enabled:
+ gc.disable()
+
+ def test_args_string(self):
+ # args is a string
+ f, fname = mkstemp()
+ os.write(f, "#!/bin/sh\n")
+ os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
+ sys.executable)
+ os.close(f)
+ os.chmod(fname, 0700)
+ p = subprocess.Popen(fname)
+ p.wait()
+ os.remove(fname)
+ self.assertEqual(p.returncode, 47)
+
+ def test_invalid_args(self):
+ # invalid arguments should raise ValueError
+ self.assertRaises(ValueError, subprocess.call,
+ [sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"],
+ startupinfo=47)
+ self.assertRaises(ValueError, subprocess.call,
+ [sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"],
+ creationflags=47)
+
+ def test_shell_sequence(self):
+ # Run command through the shell (sequence)
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "apple"
+ p = subprocess.Popen(["echo $FRUIT"], shell=1,
+ stdout=subprocess.PIPE,
+ env=newenv)
+ self.assertEqual(p.stdout.read().strip(), "apple")
+
+ def test_shell_string(self):
+ # Run command through the shell (string)
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "apple"
+ p = subprocess.Popen("echo $FRUIT", shell=1,
+ stdout=subprocess.PIPE,
+ env=newenv)
+ self.assertEqual(p.stdout.read().strip(), "apple")
+
+ def test_call_string(self):
+ # call() function with string argument on UNIX
+ f, fname = mkstemp()
+ os.write(f, "#!/bin/sh\n")
+ os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" %
+ sys.executable)
+ os.close(f)
+ os.chmod(fname, 0700)
+ rc = subprocess.call(fname)
+ os.remove(fname)
+ self.assertEqual(rc, 47)
+
+ def test_specific_shell(self):
+ # Issue #9265: Incorrect name passed as arg[0].
+ shells = []
+ for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
+ for name in ['bash', 'ksh']:
+ sh = os.path.join(prefix, name)
+ if os.path.isfile(sh):
+ shells.append(sh)
+ if not shells: # Will probably work for any shell but csh.
+ self.skipTest("bash or ksh required for this test")
+ sh = '/bin/sh'
+ if os.path.isfile(sh) and not os.path.islink(sh):
+ # Test will fail if /bin/sh is a symlink to csh.
+ shells.append(sh)
+ for sh in shells:
+ p = subprocess.Popen("echo $0", executable=sh, shell=True,
+ stdout=subprocess.PIPE)
+ self.assertEqual(p.stdout.read().strip(), sh)
+
+ def _kill_process(self, method, *args):
+ # Do not inherit file handles from the parent.
+ # It should fix failures on some platforms.
+ p = subprocess.Popen([sys.executable, "-c", yenv + """if 1:
+ import sys, time
+ sys.stdout.write('x\\n')
+ sys.stdout.flush()
+ time.sleep(30)
+ """],
+ close_fds=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ # Wait for the interpreter to be completely initialized before
+ # sending any signal.
+ p.stdout.read(1)
+ getattr(p, method)(*args)
+ return p
+
+ def test_send_signal(self):
+ p = self._kill_process('send_signal', signal.SIGINT)
+ _, stderr = p.communicate()
+ self.assertIn('KeyboardInterrupt', stderr)
+ self.assertNotEqual(p.wait(), 0)
+
+ def test_kill(self):
+ p = self._kill_process('kill')
+ _, stderr = p.communicate()
+ self.assertStderrEqual(stderr, '')
+ self.assertEqual(p.wait(), -signal.SIGKILL)
+
+ def test_terminate(self):
+ p = self._kill_process('terminate')
+ _, stderr = p.communicate()
+ self.assertStderrEqual(stderr, '')
+ self.assertEqual(p.wait(), -signal.SIGTERM)
+
+ def check_close_std_fds(self, fds):
+ # Issue #9905: test that subprocess pipes still work properly with
+ # some standard fds closed
+ stdin = 0
+ newfds = []
+ for a in fds:
+ b = os.dup(a)
+ newfds.append(b)
+ if a == 0:
+ stdin = b
+ try:
+ for fd in fds:
+ os.close(fd)
+ out, err = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys;'
+ 'sys.stdout.write("apple");'
+ 'sys.stdout.flush();'
+ 'sys.stderr.write("orange")'],
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).communicate()
+ err = strip_python_stderr(err)
+ self.assertEqual((out, err), ('apple', 'orange'))
+ finally:
+ for b, a in zip(newfds, fds):
+ os.dup2(b, a)
+ for b in newfds:
+ os.close(b)
+
+ def test_close_fd_0(self):
+ self.check_close_std_fds([0])
+
+ def test_close_fd_1(self):
+ self.check_close_std_fds([1])
+
+ def test_close_fd_2(self):
+ self.check_close_std_fds([2])
+
+ def test_close_fds_0_1(self):
+ self.check_close_std_fds([0, 1])
+
+ def test_close_fds_0_2(self):
+ self.check_close_std_fds([0, 2])
+
+ def test_close_fds_1_2(self):
+ self.check_close_std_fds([1, 2])
+
+ def test_close_fds_0_1_2(self):
+ # Issue #10806: test that subprocess pipes still work properly with
+ # all standard fds closed.
+ self.check_close_std_fds([0, 1, 2])
+
+ def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
+ # open up some temporary files
+ temps = [mkstemp() for i in range(3)]
+ temp_fds = [fd for fd, fname in temps]
+ try:
+ # unlink the files -- we won't need to reopen them
+ for fd, fname in temps:
+ os.unlink(fname)
+
+ # save a copy of the standard file descriptors
+ saved_fds = [os.dup(fd) for fd in range(3)]
+ try:
+ # duplicate the temp files over the standard fd's 0, 1, 2
+ for fd, temp_fd in enumerate(temp_fds):
+ os.dup2(temp_fd, fd)
+
+ # write some data to what will become stdin, and rewind
+ os.write(stdin_no, "STDIN")
+ os.lseek(stdin_no, 0, 0)
+
+ # now use those files in the given order, so that subprocess
+ # has to rearrange them in the child
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; got = sys.stdin.read();'
+ 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
+ stdin=stdin_no,
+ stdout=stdout_no,
+ stderr=stderr_no)
+ p.wait()
+
+ for fd in temp_fds:
+ os.lseek(fd, 0, 0)
+
+ out = os.read(stdout_no, 1024)
+ err = os.read(stderr_no, 1024)
+ finally:
+ for std, saved in enumerate(saved_fds):
+ os.dup2(saved, std)
+ os.close(saved)
+
+ self.assertEqual(out, "got STDIN")
+ self.assertStderrEqual(err, "err")
+
+ finally:
+ for fd in temp_fds:
+ os.close(fd)
+
+ # When duping fds, if there arises a situation where one of the fds is
+ # either 0, 1 or 2, it is possible that it is overwritten (#12607).
+ # This tests all combinations of this.
+ def test_swap_fds(self):
+ self.check_swap_fds(0, 1, 2)
+ self.check_swap_fds(0, 2, 1)
+ self.check_swap_fds(1, 0, 2)
+ self.check_swap_fds(1, 2, 0)
+ self.check_swap_fds(2, 0, 1)
+ self.check_swap_fds(2, 1, 0)
+
+ def test_small_errpipe_write_fd(self):
+ """Issue #15798: Popen should work when stdio fds are available."""
+ new_stdin = os.dup(0)
+ new_stdout = os.dup(1)
+ try:
+ os.close(0)
+ os.close(1)
+
+ subprocess.Popen([
+ sys.executable, "-c", yenv + "pass"]).wait()
+ finally:
+ # Restore original stdin and stdout
+ os.dup2(new_stdin, 0)
+ os.dup2(new_stdout, 1)
+ os.close(new_stdin)
+ os.close(new_stdout)
+
+ def test_remapping_std_fds(self):
+ # open up some temporary files
+ temps = [mkstemp() for i in range(3)]
+ try:
+ temp_fds = [fd for fd, fname in temps]
+
+ # unlink the files -- we won't need to reopen them
+ for fd, fname in temps:
+ os.unlink(fname)
+
+ # write some data to what will become stdin, and rewind
+ os.write(temp_fds[1], "STDIN")
+ os.lseek(temp_fds[1], 0, 0)
+
+ # move the standard file descriptors out of the way
+ saved_fds = [os.dup(fd) for fd in range(3)]
+ try:
+ # duplicate the file objects over the standard fd's
+ for fd, temp_fd in enumerate(temp_fds):
+ os.dup2(temp_fd, fd)
+
+ # now use those files in the "wrong" order, so that subprocess
+ # has to rearrange them in the child
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys; got = sys.stdin.read();'
+ 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
+ stdin=temp_fds[1],
+ stdout=temp_fds[2],
+ stderr=temp_fds[0])
+ p.wait()
+ finally:
+ # restore the original fd's underneath sys.stdin, etc.
+ for std, saved in enumerate(saved_fds):
+ os.dup2(saved, std)
+ os.close(saved)
+
+ for fd in temp_fds:
+ os.lseek(fd, 0, 0)
+
+ out = os.read(temp_fds[2], 1024)
+ err = os.read(temp_fds[0], 1024)
+ self.assertEqual(out, "got STDIN")
+ self.assertStderrEqual(err, "err")
+
+ finally:
+ for fd in temp_fds:
+ os.close(fd)
+
+ # NOTE: test_surrogates_error_message makes no sense on python 2.x. omitted.
+ # NOTE: test_undecodable_env makes no sense on python 2.x. omitted.
+ # NOTE: test_bytes_program makes no sense on python 2.x. omitted.
+
+ if sys.version_info[:2] >= (2,7):
+ # Disabling this test on 2.6 and earlier as it fails on Travis CI regardless
+ # of LANG=C being set and is not worth the time to figure out why in such a
+ # legacy environment..
+ # https://travis-ci.org/google/python-subprocess32/jobs/290065729
+ def test_fs_encode_unicode_error(self):
+ fs_encoding = sys.getfilesystemencoding()
+ if fs_encoding.upper() not in ("ANSI_X3.4-1968", "ASCII"):
+ self.skipTest(
+ "Requires a restictive sys.filesystemencoding(), "
+ "not %s. Run python with LANG=C" % fs_encoding)
+ highbit_executable_name = os.path.join(
+ test_support.findfile("testdata"), u"Does\\Not\uDCff\\Exist")
+ try:
+ subprocess.call([highbit_executable_name])
+ except UnicodeEncodeError:
+ return
+ except RuntimeError, e:
+ # The ProcessTestCasePOSIXPurePython version ends up here. It
+ # can't re-construct the unicode error from the child because it
+ # doesn't have all the arguments. BFD. One doesn't use
+ # subprocess32 for the old pure python implementation...
+ if "UnicodeEncodeError" not in str(e):
+ self.fail("Expected a RuntimeError whining about how a "
+ "UnicodeEncodeError from the child could not "
+ "be reraised. Not: %s" % e)
+ return
+ self.fail("Expected a UnicodeEncodeError to be raised.")
+
+ def test_pipe_cloexec(self):
+ sleeper = test_support.findfile("testdata/input_reader.py")
+ fd_status = test_support.findfile("testdata/fd_status.py")
+
+ p1 = subprocess.Popen([sys.executable, sleeper],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, close_fds=False)
+
+ self.addCleanup(p1.communicate, '')
+
+ p2 = subprocess.Popen([sys.executable, fd_status],
+ stdout=subprocess.PIPE, close_fds=False)
+
+ output, error = p2.communicate()
+ result_fds = set(map(int, output.split(',')))
+ unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
+ p1.stderr.fileno()])
+
+ self.assertFalse(result_fds & unwanted_fds,
+ "Expected no fds from %r to be open in child, "
+ "found %r" %
+ (unwanted_fds, result_fds & unwanted_fds))
+
+ def test_pipe_cloexec_real_tools(self):
+ qcat = test_support.findfile("testdata/qcat.py")
+ qgrep = test_support.findfile("testdata/qgrep.py")
+
+ subdata = 'zxcvbn'
+ data = subdata * 4 + '\n'
+
+ p1 = subprocess.Popen([sys.executable, qcat],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ close_fds=False)
+
+ p2 = subprocess.Popen([sys.executable, qgrep, subdata],
+ stdin=p1.stdout, stdout=subprocess.PIPE,
+ close_fds=False)
+
+ self.addCleanup(p1.wait)
+ self.addCleanup(p2.wait)
+ def kill_p1():
+ try:
+ p1.terminate()
+ except ProcessLookupError:
+ pass
+ def kill_p2():
+ try:
+ p2.terminate()
+ except ProcessLookupError:
+ pass
+ self.addCleanup(kill_p1)
+ self.addCleanup(kill_p2)
+
+ p1.stdin.write(data)
+ p1.stdin.close()
+
+ readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
+
+ self.assertTrue(readfiles, "The child hung")
+ self.assertEqual(p2.stdout.read(), data)
+
+ p1.stdout.close()
+ p2.stdout.close()
+
+ def test_close_fds(self):
+ fd_status = test_support.findfile("testdata/fd_status.py")
+
+ fds = os.pipe()
+ self.addCleanup(os.close, fds[0])
+ self.addCleanup(os.close, fds[1])
+
+ open_fds = set(fds)
+ # add a bunch more fds
+ for _ in range(9):
+ fd = os.open("/dev/null", os.O_RDONLY)
+ self.addCleanup(os.close, fd)
+ open_fds.add(fd)
+
+ p = subprocess.Popen([sys.executable, fd_status],
+ stdout=subprocess.PIPE, close_fds=False)
+ output, ignored = p.communicate()
+ remaining_fds = set(map(int, output.split(',')))
+
+ self.assertEqual(remaining_fds & open_fds, open_fds,
+ "Some fds were closed")
+
+ p = subprocess.Popen([sys.executable, fd_status],
+ stdout=subprocess.PIPE, close_fds=True)
+ output, ignored = p.communicate()
+ remaining_fds = set(map(int, output.split(',')))
+
+ self.assertFalse(remaining_fds & open_fds,
+ "Some fds were left open")
+ self.assertIn(1, remaining_fds, "Subprocess failed")
+
+ # Keep some of the fd's we opened open in the subprocess.
+ # This tests _posixsubprocess.c's proper handling of fds_to_keep.
+ fds_to_keep = set(open_fds.pop() for _ in range(8))
+ p = subprocess.Popen([sys.executable, fd_status],
+ stdout=subprocess.PIPE, close_fds=True,
+ pass_fds=())
+ output, ignored = p.communicate()
+ remaining_fds = set(map(int, output.split(',')))
+
+ self.assertFalse(remaining_fds & fds_to_keep & open_fds,
+ "Some fds not in pass_fds were left open")
+ self.assertIn(1, remaining_fds, "Subprocess failed")
+
+
+ def test_close_fds_when_max_fd_is_lowered(self):
+ """Confirm that issue21618 is fixed (may fail under valgrind)."""
+ fd_status = test_support.findfile("testdata/fd_status.py")
+
+ open_fds = set()
+ # Add a bunch more fds to pass down.
+ for _ in range(40):
+ fd = os.open("/dev/null", os.O_RDONLY)
+ open_fds.add(fd)
+
+ # Leave a two pairs of low ones available for use by the
+ # internal child error pipe and the stdout pipe.
+ # We also leave 10 more open for use by the Python 2 startup
+ # import machinery which tends to consume several at once.
+ for fd in sorted(open_fds)[:14]:
+ os.close(fd)
+ open_fds.remove(fd)
+
+ for fd in open_fds:
+ self.addCleanup(os.close, fd)
+
+ max_fd_open = max(open_fds)
+
+ import resource
+ rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
+ try:
+ # 29 is lower than the highest fds we are leaving open.
+ resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
+ # Launch a new Python interpreter with our low fd rlim_cur that
+ # inherits open fds above that limit. It then uses subprocess
+ # with close_fds=True to get a report of open fds in the child.
+ # An explicit list of fds to check is passed to fd_status.py as
+ # letting fd_status rely on its default logic would miss the
+ # fds above rlim_cur as it normally only checks up to that limit.
+ p = subprocess.Popen(
+ [sys.executable, '-c', yenv +
+ textwrap.dedent("""
+ import subprocess32, sys
+ subprocess32.Popen([sys.executable, %(fd_status)r] +
+ [str(x) for x in range(%(max_fd)d)],
+ close_fds=True).wait()
+ """ % dict(fd_status=fd_status, max_fd=max_fd_open+1))],
+ stdout=subprocess.PIPE, close_fds=False)
+ finally:
+ resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
+
+ output, unused_stderr = p.communicate()
+ remaining_fds = set(map(int, output.strip().split(',')))
+
+ self.assertFalse(remaining_fds & open_fds,
+ msg="Some fds were left open.")
+
+
+ def test_pass_fds(self):
+ fd_status = test_support.findfile("testdata/fd_status.py")
+
+ open_fds = set()
+
+ for x in range(5):
+ fds = os.pipe()
+ self.addCleanup(os.close, fds[0])
+ self.addCleanup(os.close, fds[1])
+ open_fds.update(fds)
+
+ for fd in open_fds:
+ p = subprocess.Popen([sys.executable, fd_status],
+ stdout=subprocess.PIPE, close_fds=True,
+ pass_fds=(fd, ))
+ output, ignored = p.communicate()
+
+ remaining_fds = set(map(int, output.split(',')))
+ to_be_closed = open_fds - set((fd,))
+
+ self.assertIn(fd, remaining_fds, "fd to be passed not passed")
+ self.assertFalse(remaining_fds & to_be_closed,
+ "fd to be closed passed")
+
+ # Syntax requires Python 2.5, assertWarns requires Python 2.7.
+ #with self.assertWarns(RuntimeWarning) as context:
+ # self.assertFalse(subprocess.call(
+ # [sys.executable, "-c", yenv + "import sys; sys.exit(0)"],
+ # close_fds=False, pass_fds=(fd, )))
+ #self.assertIn('overriding close_fds', str(context.warning))
+
+ def test_stdout_stdin_are_single_inout_fd(self):
+ inout = open(os.devnull, "r+")
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv + "import sys; sys.exit(0)"],
+ stdout=inout, stdin=inout)
+ p.wait()
+ finally:
+ inout.close()
+
+ def test_stdout_stderr_are_single_inout_fd(self):
+ inout = open(os.devnull, "r+")
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv + "import sys; sys.exit(0)"],
+ stdout=inout, stderr=inout)
+ p.wait()
+ finally:
+ inout.close()
+
+ def test_stderr_stdin_are_single_inout_fd(self):
+ inout = open(os.devnull, "r+")
+ try:
+ p = subprocess.Popen([sys.executable, "-c", yenv + "import sys; sys.exit(0)"],
+ stderr=inout, stdin=inout)
+ p.wait()
+ finally:
+ inout.close()
+
+ def test_wait_when_sigchild_ignored(self):
+ # NOTE: sigchild_ignore.py may not be an effective test on all OSes.
+ sigchild_ignore = test_support.findfile("testdata/sigchild_ignore.py")
+ p = subprocess.Popen([sys.executable, sigchild_ignore],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
+ " non-zero with this error:\n%s" % stderr)
+
+ def test_select_unbuffered(self):
+ # Issue #11459: bufsize=0 should really set the pipes as
+ # unbuffered (and therefore let select() work properly).
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys;'
+ 'sys.stdout.write("apple")'],
+ stdout=subprocess.PIPE,
+ bufsize=0)
+ f = p.stdout
+ self.addCleanup(f.close)
+ try:
+ self.assertEqual(f.read(4), "appl")
+ self.assertIn(f, select.select([f], [], [], 0.0)[0])
+ finally:
+ p.wait()
+
+ def test_zombie_fast_process_del(self):
+ # Issue #12650: on Unix, if Popen.__del__() was called before the
+ # process exited, it wouldn't be added to subprocess._active, and would
+ # remain a zombie.
+ # spawn a Popen, and delete its reference before it exits
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import sys, time;'
+ 'time.sleep(0.2)'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ ident = id(p)
+ pid = p.pid
+ del p
+ # check that p is in the active processes list
+ self.assertIn(ident, [id(o) for o in subprocess._active])
+
+ def test_leak_fast_process_del_killed(self):
+ # Issue #12650: on Unix, if Popen.__del__() was called before the
+ # process exited, and the process got killed by a signal, it would never
+ # be removed from subprocess._active, which triggered a FD and memory
+ # leak.
+ # spawn a Popen, delete its reference and kill it
+ p = subprocess.Popen([sys.executable, "-c", yenv +
+ 'import time;'
+ 'time.sleep(3)'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ ident = id(p)
+ pid = p.pid
+ del p
+ os.kill(pid, signal.SIGKILL)
+ # check that p is in the active processes list
+ self.assertIn(ident, [id(o) for o in subprocess._active])
+
+ # let some time for the process to exit, and create a new Popen: this
+ # should trigger the wait() of p
+ time.sleep(0.2)
+ try:
+ proc = subprocess.Popen(['nonexisting_i_hope'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.__exit__(None, None, None)
+ except EnvironmentError:
+ pass
+ else:
+ self.fail("EnvironmentError not raised.")
+ # p should have been wait()ed on, and removed from the _active list
+ self.assertRaises(OSError, os.waitpid, pid, 0)
+ self.assertNotIn(ident, [id(o) for o in subprocess._active])
+
+ def test_close_fds_after_preexec(self):
+ fd_status = test_support.findfile("testdata/fd_status.py")
+
+ # this FD is used as dup2() target by preexec_fn, and should be closed
+ # in the child process
+ fd = os.dup(1)
+ self.addCleanup(os.close, fd)
+
+ p = subprocess.Popen([sys.executable, fd_status],
+ stdout=subprocess.PIPE, close_fds=True,
+ preexec_fn=lambda: os.dup2(1, fd))
+ output, ignored = p.communicate()
+
+ remaining_fds = set(map(int, output.split(',')))
+
+ self.assertNotIn(fd, remaining_fds)
+
+ def test_child_terminated_in_stopped_state(self):
+ """Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
+ if not ctypes:
+ sys.stderr.write('ctypes module required.\n')
+ return
+ if not sys.executable:
+ self.stderr.write('Test requires sys.executable.\n')
+ return
+ PTRACE_TRACEME = 0 # From glibc and MacOS (PT_TRACE_ME).
+ libc_name = ctypes.util.find_library('c')
+ libc = ctypes.CDLL(libc_name)
+ if not hasattr(libc, 'ptrace'):
+ self.stderr.write('ptrace() required.\n')
+ return
+ test_ptrace = subprocess.Popen(
+ [sys.executable, '-c', yenv + """if True:
+ import ctypes
+ libc = ctypes.CDLL({libc_name!r})
+ libc.ptrace({PTRACE_TRACEME}, 0, 0)
+ """.format(libc_name=libc_name, PTRACE_TRACEME=PTRACE_TRACEME)
+ ])
+ if test_ptrace.wait() != 0:
+ sys.stderr.write('ptrace() failed - unable to test.\n')
+ return
+ child = subprocess.Popen(
+ [sys.executable, '-c', yenv + """if True:
+ import ctypes
+ libc = ctypes.CDLL({libc_name!r})
+ libc.ptrace({PTRACE_TRACEME}, 0, 0)
+ libc.printf(ctypes.c_char_p(0xdeadbeef)) # Crash the process.
+ """.format(libc_name=libc_name, PTRACE_TRACEME=PTRACE_TRACEME)
+ ])
+ try:
+ returncode = child.wait()
+ except Exception, e:
+ child.kill() # Clean up the hung stopped process.
+ raise e
+ self.assertNotEqual(0, returncode)
+ self.assert_(returncode < 0, msg=repr(returncode)) # signal death, likely SIGSEGV.
+
+
+if mswindows:
+ class POSIXProcessTestCase(unittest.TestCase): pass
+
+
+#@unittest.skipUnless(mswindows, "Windows specific tests")
+class Win32ProcessTestCase(BaseTestCase):
+
+ def test_startupinfo(self):
+ # startupinfo argument
+ # We uses hardcoded constants, because we do not want to
+ # depend on win32all.
+ STARTF_USESHOWWINDOW = 1
+ SW_MAXIMIZE = 3
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags = STARTF_USESHOWWINDOW
+ startupinfo.wShowWindow = SW_MAXIMIZE
+ # Since Python is a console process, it won't be affected
+ # by wShowWindow, but the argument should be silently
+ # ignored
+ subprocess.call([sys.executable, "-c", yenv + "import sys; sys.exit(0)"],
+ startupinfo=startupinfo)
+
+ def test_creationflags(self):
+ # creationflags argument
+ CREATE_NEW_CONSOLE = 16
+ sys.stderr.write(" a DOS box should flash briefly ...\n")
+ subprocess.call(sys.executable +
+ ' -c "import time; time.sleep(0.25)"',
+ creationflags=CREATE_NEW_CONSOLE)
+
+ def test_invalid_args(self):
+ # invalid arguments should raise ValueError
+ self.assertRaises(ValueError, subprocess.call,
+ [sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"],
+ preexec_fn=lambda: 1)
+ self.assertRaises(ValueError, subprocess.call,
+ [sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"],
+ stdout=subprocess.PIPE,
+ close_fds=True)
+
+ def test_close_fds(self):
+ # close file descriptors
+ rc = subprocess.call([sys.executable, "-c", yenv +
+ "import sys; sys.exit(47)"],
+ close_fds=True)
+ self.assertEqual(rc, 47)
+
+ def test_shell_sequence(self):
+ # Run command through the shell (sequence)
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "physalis"
+ p = subprocess.Popen(["set"], shell=1,
+ stdout=subprocess.PIPE,
+ env=newenv)
+ self.assertIn("physalis", p.stdout.read())
+
+ def test_shell_string(self):
+ # Run command through the shell (string)
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "physalis"
+ p = subprocess.Popen("set", shell=1,
+ stdout=subprocess.PIPE,
+ env=newenv)
+ self.assertIn("physalis", p.stdout.read())
+
+ def test_call_string(self):
+ # call() function with string argument on Windows
+ rc = subprocess.call(sys.executable +
+ ' -c "import sys; sys.exit(47)"')
+ self.assertEqual(rc, 47)
+
+ def _kill_process(self, method, *args):
+ # Some win32 buildbot raises EOFError if stdin is inherited
+ p = subprocess.Popen([sys.executable, "-c", yenv + "input()"],
+ stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # Let the process initialize (Issue #3137)
+ time.sleep(0.1)
+ # The process should not terminate prematurely
+ self.assert_(p.poll() is None)
+ # Retry if the process do not receive the signal.
+ count, maxcount = 0, 3
+ while count < maxcount and p.poll() is None:
+ getattr(p, method)(*args)
+ time.sleep(0.1)
+ count += 1
+
+ returncode = p.poll()
+ self.assert_(returncode is not None, "the subprocess did not terminate")
+ if count > 1:
+ print >>sys.stderr, ("p.{}{} succeeded after "
+ "{} attempts".format(method, args, count))
+ _, stderr = p.communicate()
+ self.assertStderrEqual(stderr, '')
+ self.assertEqual(p.wait(), returncode)
+ self.assertNotEqual(returncode, 0)
+
+ def test_send_signal(self):
+ self._kill_process('send_signal', signal.SIGTERM)
+
+ def test_kill(self):
+ self._kill_process('kill')
+
+ def test_terminate(self):
+ self._kill_process('terminate')
+
+
+if not mswindows:
+ class Win32ProcessTestCase(unittest.TestCase): pass
+
+
+#@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
+# "poll system call not supported")
+class ProcessTestCaseNoPoll(ProcessTestCase):
+ def setUp(self):
+ subprocess._has_poll = False
+ ProcessTestCase.setUp(self)
+
+ def tearDown(self):
+ subprocess._has_poll = True
+ ProcessTestCase.tearDown(self)
+
+
+if not getattr(subprocess, '_has_poll', False):
+ class ProcessTestCaseNoPoll(unittest.TestCase): pass
+
+
+#@unittest.skipUnless(getattr(subprocess, '_posixsubprocess', False),
+# "_posixsubprocess extension module not found.")
+class ProcessTestCasePOSIXPurePython(ProcessTestCase, POSIXProcessTestCase):
+ def setUp(self):
+ subprocess._posixsubprocess = None
+ ProcessTestCase.setUp(self)
+ POSIXProcessTestCase.setUp(self)
+
+ def tearDown(self):
+ subprocess._posixsubprocess = sys.modules['_posixsubprocess32']
+ POSIXProcessTestCase.tearDown(self)
+ ProcessTestCase.tearDown(self)
+
+
+class POSIXSubprocessModuleTestCase(unittest.TestCase):
+ def test_fork_exec_sorted_fd_sanity_check(self):
+ # Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
+ _posixsubprocess = subprocess._posixsubprocess
+ gc_enabled = gc.isenabled()
+ try:
+ gc.enable()
+
+ for fds_to_keep in (
+ (-1, 2, 3, 4, 5), # Negative number.
+ ('str', 4), # Not an int.
+ (18, 23, 42, 2**63), # Out of range.
+ (5, 4), # Not sorted.
+ (6, 7, 7, 8), # Duplicate.
+ ):
+ try:
+ _posixsubprocess.fork_exec(
+ ["false"], ["false"],
+ True, fds_to_keep, None, ["env"],
+ -1, -1, -1, -1,
+ 1, 2, 3, 4,
+ True, True, None)
+ except ValueError, exception:
+ self.assertTrue('fds_to_keep' in str(exception),
+ msg=str(exception))
+ else:
+ self.fail("ValueError not raised, fds_to_keep=%s" %
+ (fds_to_keep,))
+ finally:
+ if not gc_enabled:
+ gc.disable()
+
+ def test_cloexec_pass_fds(self):
+ if not os.path.exists('/dev/null') or not os.path.isdir('/dev/fd'):
+ print("Skipped - This test requires /dev/null and /dev/fd/*.")
+ return
+ null_reader_proc = subprocess.Popen(
+ ["cat"],
+ stdin=open('/dev/null', 'rb'),
+ stdout=subprocess.PIPE)
+ try:
+ data = null_reader_proc.stdout
+ fd_name = '/dev/fd/%d' % data.fileno()
+ fd_reader_proc = subprocess.Popen(
+ ["cat", fd_name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, # Capture any error from cat.
+ pass_fds=(data.fileno(),))
+ try:
+ fddata = fd_reader_proc.stdout
+ self.assertEqual('', fddata.read())
+ finally:
+ fd_reader_proc.wait()
+ finally:
+ null_reader_proc.wait()
+
+
+if not getattr(subprocess, '_posixsubprocess', False):
+ print >>sys.stderr, "_posixsubprocess extension module not found."
+ class ProcessTestCasePOSIXPurePython(unittest.TestCase): pass
+ class POSIXSubprocessModuleTestCase(unittest.TestCase): pass
+
+
+class HelperFunctionTests(unittest.TestCase):
+ #@unittest.skipIf(mswindows, "errno and EINTR make no sense on windows")
+ def test_eintr_retry_call(self):
+ record_calls = []
+ def fake_os_func(*args):
+ record_calls.append(args)
+ if len(record_calls) == 2:
+ raise OSError(errno.EINTR, "fake interrupted system call")
+ return tuple(reversed(args))
+
+ self.assertEqual((999, 256),
+ subprocess._eintr_retry_call(fake_os_func, 256, 999))
+ self.assertEqual([(256, 999)], record_calls)
+ # This time there will be an EINTR so it will loop once.
+ self.assertEqual((666,),
+ subprocess._eintr_retry_call(fake_os_func, 666))
+ self.assertEqual([(256, 999), (666,), (666,)], record_calls)
+
+ if mswindows:
+ del test_eintr_retry_call
+
+ if not hasattr(unittest.TestCase, 'assertSequenceEqual'):
+ def assertSequenceEqual(self, seq1, seq2):
+ self.assertEqual(list(seq1), list(seq2))
+
+ def test_get_exec_path(self):
+ defpath_list = os.defpath.split(os.pathsep)
+ test_path = ['/monty', '/python', '', '/flying/circus']
+ test_env = {'PATH': os.pathsep.join(test_path)}
+
+ get_exec_path = subprocess._get_exec_path
+ saved_environ = os.environ
+ try:
+ os.environ = dict(test_env)
+ # Test that defaulting to os.environ works.
+ self.assertSequenceEqual(test_path, get_exec_path())
+ self.assertSequenceEqual(test_path, get_exec_path(env=None))
+ finally:
+ os.environ = saved_environ
+
+ # No PATH environment variable
+ self.assertSequenceEqual(defpath_list, get_exec_path({}))
+ # Empty PATH environment variable
+ self.assertSequenceEqual(('',), get_exec_path({'PATH':''}))
+ # Supplied PATH environment variable
+ self.assertSequenceEqual(test_path, get_exec_path(test_env))
+
+ def test_args_from_interpreter_flags(self):
+ if sys.version_info[:2] < (2,6):
+ print "Skipped - only useful on 2.6 and higher."
+ return
+ # Mostly just to call it for code coverage.
+ args_list = subprocess32._args_from_interpreter_flags()
+ self.assertTrue(isinstance(args_list, list), msg=repr(args_list))
+
+ def test_timeout_expired_unpickling(self):
+ """https://github.com/google/python-subprocess32/issues/57"""
+ t = subprocess32.TimeoutExpired(['command', 'arg1'], 5,
+ output='stdout!', stderr='err')
+ t_pickled = pickle.dumps(t)
+ t2 = pickle.loads(t_pickled)
+ self.assertEqual(t.cmd, t2.cmd)
+ self.assertEqual(t.timeout, t2.timeout)
+ self.assertEqual(t.output, t2.output)
+ self.assertEqual(t.stderr, t2.stderr)
+
+ def test_called_process_error_unpickling(self):
+ """https://github.com/google/python-subprocess32/issues/57"""
+ e = subprocess32.CalledProcessError(
+ 2, ['command', 'arg1'], output='stdout!', stderr='err')
+ e_pickled = pickle.dumps(e)
+ e2 = pickle.loads(e_pickled)
+ self.assertEqual(e.returncode, e2.returncode)
+ self.assertEqual(e.cmd, e2.cmd)
+ self.assertEqual(e.output, e2.output)
+ self.assertEqual(e.stderr, e2.stderr)
+
+
+def reap_children():
+ """Use this function at the end of test_main() whenever sub-processes
+ are started. This will help ensure that no extra children (zombies)
+ stick around to hog resources and create problems when looking
+ for refleaks.
+ """
+
+ # Reap all our dead child processes so we don't leave zombies around.
+ # These hog resources and might be causing some of the buildbots to die.
+ if hasattr(os, 'waitpid'):
+ any_process = -1
+ while True:
+ try:
+ # This will raise an exception on Windows. That's ok.
+ pid, status = os.waitpid(any_process, os.WNOHANG)
+ if pid == 0:
+ break
+ except:
+ break
+
+
+
+class ContextManagerTests(BaseTestCase):
+
+ def test_pipe(self):
+ proc = subprocess.Popen([sys.executable, "-c", yenv +
+ "import sys;"
+ "sys.stdout.write('stdout');"
+ "sys.stderr.write('stderr');"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ try:
+ self.assertEqual(proc.stdout.read(), "stdout")
+ self.assertStderrEqual(proc.stderr.read(), "stderr")
+ finally:
+ proc.__exit__(None, None, None)
+
+ self.assertTrue(proc.stdout.closed)
+ self.assertTrue(proc.stderr.closed)
+
+ def test_returncode(self):
+ proc = subprocess.Popen([sys.executable, "-c", yenv +
+ "import sys; sys.exit(100)"])
+ proc.__exit__(None, None, None)
+ # __exit__ calls wait(), so the returncode should be set
+ self.assertEqual(proc.returncode, 100)
+
+ def test_communicate_stdin(self):
+ proc = subprocess.Popen([sys.executable, "-c", yenv +
+ "import sys;"
+ "sys.exit(sys.stdin.read() == 'context')"],
+ stdin=subprocess.PIPE)
+ try:
+ proc.communicate("context")
+ self.assertEqual(proc.returncode, 1)
+ finally:
+ proc.__exit__(None, None, None)
+
+ def test_invalid_args(self):
+ try:
+ proc = subprocess.Popen(['nonexisting_i_hope'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.__exit__(None, None, None)
+ except EnvironmentError, exception:
+ # ignore errors that indicate the command was not found
+ if exception.errno not in (errno.ENOENT, errno.EACCES):
+ raise
+ else:
+ self.fail("Expected an EnvironmentError exception.")
+
+
+if sys.version_info[:2] <= (2,4):
+ # The test suite hangs during the pure python test on 2.4. No idea why.
+ # That is not the implementation anyone is using this module for anyways.
+ class ProcessTestCasePOSIXPurePython(unittest.TestCase): pass
+
+
+def main():
+ unit_tests = (ProcessTestCase,
+ POSIXProcessTestCase,
+ POSIXSubprocessModuleTestCase,
+ Win32ProcessTestCase,
+ ProcessTestCasePOSIXPurePython,
+ ProcessTestCaseNoPoll,
+ HelperFunctionTests,
+ ContextManagerTests,
+ RunFuncTestCase,
+ )
+
+ test_support.run_unittest(*unit_tests)
+ reap_children()
+
+if __name__ == "__main__":
+ main()
diff --git a/contrib/deprecated/python/subprocess32/testdata/fd_status.py b/contrib/deprecated/python/subprocess32/testdata/fd_status.py
new file mode 100644
index 0000000000..67fb41c0af
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/testdata/fd_status.py
@@ -0,0 +1,34 @@
+"""When called as a script, print a comma-separated list of the open
+file descriptors on stdout.
+
+Usage:
+fd_stats.py: check all file descriptors
+fd_status.py fd1 fd2 ...: check only specified file descriptors
+"""
+
+import errno
+import os
+import stat
+import sys
+
+if __name__ == "__main__":
+ fds = []
+ if len(sys.argv) == 1:
+ try:
+ _MAXFD = os.sysconf("SC_OPEN_MAX")
+ except:
+ _MAXFD = 256
+ test_fds = range(0, _MAXFD)
+ else:
+ test_fds = map(int, sys.argv[1:])
+ for fd in test_fds:
+ try:
+ st = os.fstat(fd)
+ except OSError, e:
+ if e.errno == errno.EBADF:
+ continue
+ raise
+ # Ignore Solaris door files
+ if not hasattr(stat, 'S_ISDOOR') or not stat.S_ISDOOR(st.st_mode):
+ fds.append(fd)
+ print ','.join(map(str, fds))
diff --git a/contrib/deprecated/python/subprocess32/testdata/input_reader.py b/contrib/deprecated/python/subprocess32/testdata/input_reader.py
new file mode 100644
index 0000000000..1dc3191ad1
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/testdata/input_reader.py
@@ -0,0 +1,7 @@
+"""When called as a script, consumes the input"""
+
+import sys
+
+if __name__ == "__main__":
+ for line in sys.stdin:
+ pass
diff --git a/contrib/deprecated/python/subprocess32/testdata/qcat.py b/contrib/deprecated/python/subprocess32/testdata/qcat.py
new file mode 100644
index 0000000000..fe6f9db25c
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/testdata/qcat.py
@@ -0,0 +1,7 @@
+"""When ran as a script, simulates cat with no arguments."""
+
+import sys
+
+if __name__ == "__main__":
+ for line in sys.stdin:
+ sys.stdout.write(line)
diff --git a/contrib/deprecated/python/subprocess32/testdata/qgrep.py b/contrib/deprecated/python/subprocess32/testdata/qgrep.py
new file mode 100644
index 0000000000..69906379a9
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/testdata/qgrep.py
@@ -0,0 +1,10 @@
+"""When called with a single argument, simulated fgrep with a single
+argument and no options."""
+
+import sys
+
+if __name__ == "__main__":
+ pattern = sys.argv[1]
+ for line in sys.stdin:
+ if pattern in line:
+ sys.stdout.write(line)
diff --git a/contrib/deprecated/python/subprocess32/testdata/sigchild_ignore.py b/contrib/deprecated/python/subprocess32/testdata/sigchild_ignore.py
new file mode 100644
index 0000000000..ba5ccf2cf0
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/testdata/sigchild_ignore.py
@@ -0,0 +1,18 @@
+import signal, subprocess32, sys, time
+# On Linux this causes os.waitpid to fail with OSError as the OS has already
+# reaped our child process. The wait() passing the OSError on to the caller
+# and causing us to exit with an error is what we are testing against.
+sig_child = getattr(signal, 'SIGCLD', None)
+if sig_child is None:
+ sig_child = getattr(signal, 'SIGCHLD')
+signal.signal(sig_child, signal.SIG_IGN)
+subprocess32.Popen([sys.executable, '-c', 'print("albatross")']).wait()
+# Also ensure poll() handles an errno.ECHILD appropriately.
+p = subprocess32.Popen([sys.executable, '-c', 'print("albatross")'])
+num_polls = 0
+while p.poll() is None:
+ # Waiting for the process to finish.
+ time.sleep(0.01) # Avoid being a CPU busy loop.
+ num_polls += 1
+ if num_polls > 3000:
+ raise RuntimeError('poll should have returned 0 within 30 seconds')
diff --git a/contrib/deprecated/python/subprocess32/testdata/ya.make b/contrib/deprecated/python/subprocess32/testdata/ya.make
new file mode 100644
index 0000000000..4611e078bb
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/testdata/ya.make
@@ -0,0 +1,19 @@
+PY2TEST()
+
+SIZE(MEDIUM)
+
+NO_LINT()
+
+SRCDIR(
+ contrib/deprecated/python/subprocess32
+)
+
+TEST_SRCS(
+ test_subprocess32.py
+)
+
+TEST_CWD(
+ contrib/deprecated/python/subprocess32
+)
+
+END()
diff --git a/contrib/deprecated/python/subprocess32/ya.make b/contrib/deprecated/python/subprocess32/ya.make
new file mode 100644
index 0000000000..9613acd8c0
--- /dev/null
+++ b/contrib/deprecated/python/subprocess32/ya.make
@@ -0,0 +1,35 @@
+PY2_LIBRARY() # Backport from Python 3.
+
+LICENSE(PSF-2.0)
+
+VERSION(3.5.4)
+
+COPY_FILE(subprocess32.py subprocess.py)
+
+PY_SRCS(
+ TOP_LEVEL
+ subprocess32.py
+)
+
+IF (NOT OS_WINDOWS)
+ NO_COMPILER_WARNINGS()
+
+ SRCS(
+ _posixsubprocess.c
+ )
+
+ PY_REGISTER(_posixsubprocess32)
+
+ PY_SRCS(
+ TOP_LEVEL
+ subprocess.py
+ )
+ENDIF ()
+
+NO_LINT()
+
+END()
+
+RECURSE_FOR_TESTS(
+ testdata
+)