aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/numpy/py3/numpy/lib/tests
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.com>2023-11-16 15:22:08 +0300
committershadchin <shadchin@yandex-team.com>2023-11-16 16:50:19 +0300
commit6b2446d933e7eaaf3caf630740dd7a1ac3d331de (patch)
tree47a868648c7c6b56eeec0105304994c3dd4bfb0b /contrib/python/numpy/py3/numpy/lib/tests
parent358c0fc7925cf5ede4394a784bf8dd74ad964cc1 (diff)
downloadydb-6b2446d933e7eaaf3caf630740dd7a1ac3d331de.tar.gz
Update numpy to 1.25.2
Diffstat (limited to 'contrib/python/numpy/py3/numpy/lib/tests')
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_arraypad.py17
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_format.py10
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_function_base.py250
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_histograms.py7
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_io.py72
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_loadtxt.py11
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_nanfunctions.py38
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_recfunctions.py66
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_shape_base.py4
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_twodim_base.py11
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_type_check.py2
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_ufunclike.py6
-rw-r--r--contrib/python/numpy/py3/numpy/lib/tests/test_utils.py50
13 files changed, 467 insertions, 77 deletions
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_arraypad.py b/contrib/python/numpy/py3/numpy/lib/tests/test_arraypad.py
index a596815730..0bebe36934 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_arraypad.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_arraypad.py
@@ -1139,6 +1139,23 @@ class TestWrap:
a = np.arange(5)
b = np.pad(a, (0, 12), mode="wrap")
assert_array_equal(np.r_[a, a, a, a][:-3], b)
+
+ def test_repeated_wrapping_multiple_origin(self):
+ """
+ Assert that 'wrap' pads only with multiples of the original area if
+ the pad width is larger than the original array.
+ """
+ a = np.arange(4).reshape(2, 2)
+ a = np.pad(a, [(1, 3), (3, 1)], mode='wrap')
+ b = np.array(
+ [[3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0],
+ [3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0],
+ [3, 2, 3, 2, 3, 2],
+ [1, 0, 1, 0, 1, 0]]
+ )
+ assert_array_equal(a, b)
class TestEdge:
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_format.py b/contrib/python/numpy/py3/numpy/lib/tests/test_format.py
index abe1d44aa0..766c499c28 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_format.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_format.py
@@ -257,7 +257,8 @@ def test_load_padded_dtype(tmpdir, dt):
def test_python2_python3_interoperability():
fname = 'win64python2.npy'
path = os.path.join(yatest.common.source_path('contrib/python/numpy/py3/numpy/lib/tests'), 'data', fname)
- data = np.load(path)
+ with pytest.warns(UserWarning, match="Reading.*this warning\\."):
+ data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
@@ -746,8 +747,7 @@ def test_metadata_dtype(dt, fail):
else:
arr2 = np.load(buf)
# BUG: assert_array_equal does not check metadata
- from numpy.lib.format import _has_metadata
+ from numpy.lib.utils import drop_metadata
assert_array_equal(arr, arr2)
- assert _has_metadata(arr.dtype)
- assert not _has_metadata(arr2.dtype)
-
+ assert drop_metadata(arr.dtype) is not arr.dtype
+ assert drop_metadata(arr2.dtype) is arr2.dtype
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_function_base.py b/contrib/python/numpy/py3/numpy/lib/tests/test_function_base.py
index 8017c2cafa..11e44630e7 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_function_base.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_function_base.py
@@ -8,7 +8,7 @@ import pytest
import hypothesis
from hypothesis.extra.numpy import arrays
import hypothesis.strategies as st
-
+from functools import partial
import numpy as np
from numpy import ma
@@ -229,8 +229,8 @@ class TestAny:
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
- assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
- assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
+ assert_array_equal(np.any(y1, axis=0), [1, 1, 0])
+ assert_array_equal(np.any(y1, axis=1), [0, 1, 1])
class TestAll:
@@ -247,8 +247,8 @@ class TestAll:
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
- assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
- assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=0), [0, 0, 1])
+ assert_array_equal(np.all(y1, axis=1), [0, 0, 1])
class TestCopy:
@@ -1217,6 +1217,13 @@ class TestGradient:
dfdx = gradient(f, x)
assert_array_equal(dfdx, [0.5, 0.5])
+ def test_return_type(self):
+ res = np.gradient(([1, 2], [2, 3]))
+ if np._using_numpy2_behavior():
+ assert type(res) is tuple
+ else:
+ assert type(res) is list
+
class TestAngle:
@@ -1780,6 +1787,70 @@ class TestVectorize:
assert_equal(type(r), subclass)
assert_equal(r, m * v)
+ def test_name(self):
+ #See gh-23021
+ @np.vectorize
+ def f2(a, b):
+ return a + b
+
+ assert f2.__name__ == 'f2'
+
+ def test_decorator(self):
+ @vectorize
+ def addsubtract(a, b):
+ if a > b:
+ return a - b
+ else:
+ return a + b
+
+ r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7])
+ assert_array_equal(r, [1, 6, 1, 2])
+
+ def test_docstring(self):
+ @vectorize
+ def f(x):
+ """Docstring"""
+ return x
+
+ if sys.flags.optimize < 2:
+ assert f.__doc__ == "Docstring"
+
+ def test_partial(self):
+ def foo(x, y):
+ return x + y
+
+ bar = partial(foo, 3)
+ vbar = np.vectorize(bar)
+ assert vbar(1) == 4
+
+ def test_signature_otypes_decorator(self):
+ @vectorize(signature='(n)->(n)', otypes=['float64'])
+ def f(x):
+ return x
+
+ r = f([1, 2, 3])
+ assert_equal(r.dtype, np.dtype('float64'))
+ assert_array_equal(r, [1, 2, 3])
+ assert f.__name__ == 'f'
+
+ def test_bad_input(self):
+ with assert_raises(TypeError):
+ A = np.vectorize(pyfunc = 3)
+
+ def test_no_keywords(self):
+ with assert_raises(TypeError):
+ @np.vectorize("string")
+ def foo():
+ return "bar"
+
+ def test_positional_regression_9477(self):
+ # This supplies the first keyword argument as a positional,
+ # to ensure that they are still properly forwarded after the
+ # enhancement for #9477
+ f = vectorize((lambda x: x), ['float64'])
+ r = f([2])
+ assert_equal(r.dtype, np.dtype('float64'))
+
class TestLeaks:
class A:
@@ -2973,6 +3044,14 @@ class TestPercentile:
o = np.ones((1,))
np.percentile(d, 5, None, o, False, 'linear')
+ def test_complex(self):
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.percentile, arr_c, 0.5)
+
def test_2D(self):
x = np.array([[1, 1, 1],
[1, 1, 1],
@@ -2981,7 +3060,7 @@ class TestPercentile:
[1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
- @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
def test_linear_nan_1D(self, dtype):
# METHOD 1 of H&F
arr = np.asarray([15.0, np.NAN, 35.0, 40.0, 50.0], dtype=dtype)
@@ -2998,9 +3077,6 @@ class TestPercentile:
(np.float32, np.float32),
(np.float64, np.float64),
(np.longdouble, np.longdouble),
- (np.complex64, np.complex64),
- (np.complex128, np.complex128),
- (np.clongdouble, np.clongdouble),
(np.dtype("O"), np.float64)]
@pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES)
@@ -3040,7 +3116,7 @@ class TestPercentile:
np.testing.assert_equal(np.asarray(actual).dtype,
np.dtype(expected_dtype))
- TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"
+ TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O"
@pytest.mark.parametrize("dtype", TYPE_CODES)
def test_lower_higher(self, dtype):
@@ -3461,10 +3537,40 @@ class TestPercentile:
with pytest.raises(ValueError, match="Percentiles must be in"):
np.percentile([1, 2, 3, 4.0], q)
+ @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"])
+ @pytest.mark.parametrize("pos", [0, 23, 10])
+ def test_nat_basic(self, dtype, pos):
+ # TODO: Note that times have dubious rounding as of fixing NaTs!
+ # NaT and NaN should behave the same, do basic tests for NaT:
+ a = np.arange(0, 24, dtype=dtype)
+ a[pos] = "NaT"
+ res = np.percentile(a, 30)
+ assert res.dtype == dtype
+ assert np.isnat(res)
+ res = np.percentile(a, [30, 60])
+ assert res.dtype == dtype
+ assert np.isnat(res).all()
+
+ a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3)
+ a[pos, 1] = "NaT"
+ res = np.percentile(a, 30, axis=0)
+ assert_array_equal(np.isnat(res), [False, True, False])
+
+
+quantile_methods = [
+ 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
+ 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
+ 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher',
+ 'midpoint']
+
class TestQuantile:
# most of this is already tested by TestPercentile
+ def V(self, x, y, alpha):
+ # Identification function used in several tests.
+ return (x >= y) - alpha
+
def test_max_ulp(self):
x = [0.0, 0.2, 0.4]
a = np.quantile(x, 0.45)
@@ -3479,8 +3585,6 @@ class TestQuantile:
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
- @pytest.mark.skip
- @pytest.mark.xfail(reason="See gh-19154")
def test_correct_quantile_value(self):
a = np.array([True])
tf_quant = np.quantile(True, False)
@@ -3518,6 +3622,15 @@ class TestQuantile:
x = np.arange(8)
assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
+ def test_complex(self):
+ #See gh-22652
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.quantile, arr_c, 0.5)
+
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
@@ -3536,11 +3649,7 @@ class TestQuantile:
method="nearest")
assert res.dtype == dtype
- @pytest.mark.parametrize("method",
- ['inverted_cdf', 'averaged_inverted_cdf', 'closest_observation',
- 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear',
- 'median_unbiased', 'normal_unbiased',
- 'nearest', 'lower', 'higher', 'midpoint'])
+ @pytest.mark.parametrize("method", quantile_methods)
def test_quantile_monotonic(self, method):
# GH 14685
# test that the return value of quantile is monotonic if p0 is ordered
@@ -3571,6 +3680,94 @@ class TestQuantile:
assert np.isscalar(actual)
assert_equal(np.quantile(a, 0.5), np.nan)
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_identification_equation(self, method, alpha):
+ # Test that the identification equation holds for the empirical
+ # CDF:
+ # E[V(x, Y)] = 0 <=> x is quantile
+ # with Y the random variable for which we have observed values and
+ # V(x, y) the canonical identification function for the quantile (at
+ # level alpha), see
+ # https://doi.org/10.48550/arXiv.0912.0902
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we cover 3 cases:
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ x = np.quantile(y, alpha, method=method)
+ if method in ("higher",):
+ # These methods do not fulfill the identification equation.
+ assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n
+ elif int(n * alpha) == n * alpha:
+ # We can expect exact results, up to machine precision.
+ assert_allclose(np.mean(self.V(x, y, alpha)), 0, atol=1e-14)
+ else:
+ # V = (x >= y) - alpha cannot sum to zero exactly but within
+ # "sample precision".
+ assert_allclose(np.mean(self.V(x, y, alpha)), 0,
+ atol=1 / n / np.amin([alpha, 1 - alpha]))
+
+ @pytest.mark.parametrize("method", quantile_methods)
+ @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9])
+ def test_quantile_add_and_multiply_constant(self, method, alpha):
+ # Test that
+ # 1. quantile(c + x) = c + quantile(x)
+ # 2. quantile(c * x) = c * quantile(x)
+ # 3. quantile(-x) = -quantile(x, 1 - alpha)
+ # On empirical quantiles, this equation does not hold exactly.
+ # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these
+ # properties equivariance.
+ rng = np.random.default_rng(4321)
+ # We choose n and alpha such that we have cases for
+ # - n * alpha is an integer
+ # - n * alpha is a float that gets rounded down
+ # - n * alpha is a float that gest rounded up
+ n = 102 # n * alpha = 20.4, 51. , 91.8
+ y = rng.random(n)
+ q = np.quantile(y, alpha, method=method)
+ c = 13.5
+
+ # 1
+ assert_allclose(np.quantile(c + y, alpha, method=method), c + q)
+ # 2
+ assert_allclose(np.quantile(c * y, alpha, method=method), c * q)
+ # 3
+ q = -np.quantile(-y, 1 - alpha, method=method)
+ if method == "inverted_cdf":
+ if (
+ n * alpha == int(n * alpha)
+ or np.round(n * alpha) == int(n * alpha) + 1
+ ):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "closest_observation":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif np.round(n * alpha) == int(n * alpha) + 1:
+ assert_allclose(
+ q, np.quantile(y, alpha + 1/n, method="higher"))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ elif method == "interpolated_inverted_cdf":
+ assert_allclose(q, np.quantile(y, alpha + 1/n, method=method))
+ elif method == "nearest":
+ if n * alpha == int(n * alpha):
+ assert_allclose(q, np.quantile(y, alpha + 1/n, method=method))
+ else:
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+ elif method == "lower":
+ assert_allclose(q, np.quantile(y, alpha, method="higher"))
+ elif method == "higher":
+ assert_allclose(q, np.quantile(y, alpha, method="lower"))
+ else:
+ # "averaged_inverted_cdf", "hazen", "weibull", "linear",
+ # "median_unbiased", "normal_unbiased", "midpoint"
+ assert_allclose(q, np.quantile(y, alpha, method=method))
+
class TestLerp:
@hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False,
@@ -3894,6 +4091,25 @@ class TestMedian:
assert result is out
assert_equal(result.shape, shape_out)
+ @pytest.mark.parametrize("dtype", ["m8[s]"])
+ @pytest.mark.parametrize("pos", [0, 23, 10])
+ def test_nat_behavior(self, dtype, pos):
+ # TODO: Median does not support Datetime, due to `mean`.
+ # NaT and NaN should behave the same, do basic tests for NaT.
+ a = np.arange(0, 24, dtype=dtype)
+ a[pos] = "NaT"
+ res = np.median(a)
+ assert res.dtype == dtype
+ assert np.isnat(res)
+ res = np.percentile(a, [30, 60])
+ assert res.dtype == dtype
+ assert np.isnat(res).all()
+
+ a = np.arange(0, 24*3, dtype=dtype).reshape(-1, 3)
+ a[pos, 1] = "NaT"
+ res = np.median(a, axis=0)
+ assert_array_equal(np.isnat(res), [False, True, False])
+
class TestAdd_newdoc_ufunc:
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_histograms.py b/contrib/python/numpy/py3/numpy/lib/tests/test_histograms.py
index 87e6e1d41c..38b3d3dcbf 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_histograms.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_histograms.py
@@ -408,6 +408,13 @@ class TestHistogram:
hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))
assert_equal(type(hist), type((1, 2)))
+ def test_gh_23110(self):
+ hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'),
+ bins=2,
+ range=(-1e-308, -2e-313))
+ expected_hist = np.array([1, 0])
+ assert_array_equal(hist, expected_hist)
+
class TestHistogramOptimBinNums:
"""
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_io.py b/contrib/python/numpy/py3/numpy/lib/tests/test_io.py
index 3af2e6f42c..c1032df8e1 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_io.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_io.py
@@ -232,15 +232,16 @@ class TestSavezLoad(RoundtripTest):
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
- def test_named_arrays_with_like(self):
- a = np.array([[1, 2], [3, 4]], float)
- b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
- c = BytesIO()
- np.savez(c, file_a=a, like=b)
- c.seek(0)
- l = np.load(c)
- assert_equal(a, l['file_a'])
- assert_equal(b, l['like'])
+
+ def test_tuple_getitem_raises(self):
+ # gh-23748
+ a = np.array([1, 2, 3])
+ f = BytesIO()
+ np.savez(f, a=a)
+ f.seek(0)
+ l = np.load(f)
+ with pytest.raises(KeyError, match="(1, 2)"):
+ l[1, 2]
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
@@ -331,6 +332,21 @@ class TestSavezLoad(RoundtripTest):
data.close()
assert_(fp.closed)
+ @pytest.mark.parametrize("count, expected_repr", [
+ (1, "NpzFile {fname!r} with keys: arr_0"),
+ (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"),
+ # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are
+ # expected to end in '...'
+ (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."),
+ ])
+ def test_repr_lists_keys(self, count, expected_repr):
+ a = np.array([[1, 2], [3, 4]], float)
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, *[a]*count)
+ l = np.load(tmp)
+ assert repr(l) == expected_repr.format(fname=tmp)
+ l.close()
+
class TestSaveTxt:
def test_array(self):
@@ -532,7 +548,7 @@ class TestSaveTxt:
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
@@ -540,7 +556,7 @@ class TestSaveTxt:
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
@@ -552,12 +568,12 @@ class TestSaveTxt:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
- encoding='UTF-16-LE', dtype=np.unicode_)
+ encoding='UTF-16-LE', dtype=np.str_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -565,7 +581,7 @@ class TestSaveTxt:
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode_)
+ a = np.array([utf8], dtype=np.str_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -607,8 +623,8 @@ class TestSaveTxt:
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
- # Since Python 3.8, the default start method for multiprocessing has
- # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
+ # Since Python 3.8, the default start method for multiprocessing has
+ # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
# on memory sharing model, lead to failed test for check_large_zip
ctx = get_context('fork')
p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
@@ -662,12 +678,12 @@ class LoadTxtBase:
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
- x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
- v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
+ v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
@@ -675,7 +691,7 @@ class LoadTxtBase:
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
- x = self.loadfunc(c, dtype=np.unicode_,
+ x = self.loadfunc(c, dtype=np.str_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
@@ -686,7 +702,7 @@ class LoadTxtBase:
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
- x = self.loadfunc(path, dtype=np.unicode_,
+ x = self.loadfunc(path, dtype=np.str_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
@@ -1171,7 +1187,7 @@ class TestLoadTxt(LoadTxtBase):
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
- x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.str_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
@@ -2229,7 +2245,7 @@ M 33 21.99
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
- dtype=np.unicode_)
+ dtype=np.str_)
assert_array_equal(test, ctl)
# test a mixed dtype
@@ -2272,7 +2288,7 @@ M 33 21.99
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
- dtype=np.unicode_)
+ dtype=np.str_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
@@ -2747,3 +2763,13 @@ def test_load_refcount():
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
+
+def test_load_multiple_arrays_until_eof():
+ f = BytesIO()
+ np.save(f, 1)
+ np.save(f, 2)
+ f.seek(0)
+ assert np.load(f) == 1
+ assert np.load(f) == 2
+ with pytest.raises(EOFError):
+ np.load(f)
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_loadtxt.py b/contrib/python/numpy/py3/numpy/lib/tests/test_loadtxt.py
index f9b5a411c5..b66092e701 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_loadtxt.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_loadtxt.py
@@ -244,6 +244,14 @@ def test_converters_negative_indices_with_usecols():
usecols=[0, -1], converters={-1: (lambda x: -1)})
assert_array_equal(res, [[0, -1], [0, -1]])
+
+def test_ragged_error():
+ rows = ["1,2,3", "1,2,3", "4,3,2,1"]
+ with pytest.raises(ValueError,
+ match="the number of columns changed from 3 to 4 at row 3"):
+ np.loadtxt(rows, delimiter=",")
+
+
def test_ragged_usecols():
# usecols, and negative ones, work even with varying number of columns.
txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")
@@ -555,7 +563,8 @@ def test_quote_support_default():
txt = StringIO('"lat,long", 45, 30\n')
dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])
- with pytest.raises(ValueError, match="the number of columns changed"):
+ with pytest.raises(ValueError,
+ match="the dtype passed requires 3 columns but 4 were"):
np.loadtxt(txt, dtype=dtype, delimiter=",")
# Enable quoting support with non-None value for quotechar param
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_nanfunctions.py b/contrib/python/numpy/py3/numpy/lib/tests/test_nanfunctions.py
index 7cdcff32dc..257de381b3 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_nanfunctions.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_nanfunctions.py
@@ -404,14 +404,20 @@ class TestNanFunctions_NumberTypes:
)
def test_nanfunc_q(self, mat, dtype, nanfunc, func):
mat = mat.astype(dtype)
- tgt = func(mat, q=1)
- out = nanfunc(mat, q=1)
+ if mat.dtype.kind == "c":
+ assert_raises(TypeError, func, mat, q=1)
+ assert_raises(TypeError, nanfunc, mat, q=1)
- assert_almost_equal(out, tgt)
- if dtype == "O":
- assert type(out) is type(tgt)
else:
- assert out.dtype == tgt.dtype
+ tgt = func(mat, q=1)
+ out = nanfunc(mat, q=1)
+
+ assert_almost_equal(out, tgt)
+
+ if dtype == "O":
+ assert type(out) is type(tgt)
+ else:
+ assert out.dtype == tgt.dtype
@pytest.mark.parametrize(
"nanfunc,func",
@@ -1060,6 +1066,14 @@ class TestNanFunctions_Percentile:
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
+ def test_complex(self):
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)
+
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
@@ -1070,7 +1084,7 @@ class TestNanFunctions_Percentile:
assert_almost_equal(res, tgt)
@pytest.mark.parametrize("axis", [None, 0, 1])
- @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
@@ -1164,6 +1178,14 @@ class TestNanFunctions_Quantile:
assert_equal(np.nanquantile(x, 1), 3.5)
assert_equal(np.nanquantile(x, 0.5), 1.75)
+ def test_complex(self):
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='G')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='D')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+ arr_c = np.array([0.5+3.0j, 2.1+0.5j, 1.6+2.3j], dtype='F')
+ assert_raises(TypeError, np.nanquantile, arr_c, 0.5)
+
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
@@ -1177,7 +1199,7 @@ class TestNanFunctions_Quantile:
assert_array_equal(p, p0)
@pytest.mark.parametrize("axis", [None, 0, 1])
- @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+ @pytest.mark.parametrize("dtype", np.typecodes["Float"])
@pytest.mark.parametrize("array", [
np.array(np.nan),
np.full((3, 3), np.nan),
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_recfunctions.py b/contrib/python/numpy/py3/numpy/lib/tests/test_recfunctions.py
index 0c919a53d8..98860dfdab 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_recfunctions.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_recfunctions.py
@@ -228,7 +228,7 @@ class TestRecFunctions:
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
- def test_structured_to_unstructured(self):
+ def test_structured_to_unstructured(self, tmp_path):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
@@ -263,8 +263,13 @@ class TestRecFunctions:
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
- assert_(dd.base is d)
- assert_(ddd.base is d)
+ assert_(np.shares_memory(dd, d))
+ assert_(np.shares_memory(ddd, d))
+
+ # check that reversing the order of attributes works
+ dd_attrib_rev = structured_to_unstructured(d[['z', 'x']])
+ assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]])
+ assert_(np.shares_memory(dd_attrib_rev, d))
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
@@ -273,8 +278,30 @@ class TestRecFunctions:
('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
- assert_(dd.base is d)
- assert_(ddd.base is d)
+ assert_(np.shares_memory(dd, d))
+ assert_(np.shares_memory(ddd, d))
+
+ # check that reversing with sub-arrays works as expected
+ d_rev = d[::-1]
+ dd_rev = structured_to_unstructured(d_rev)
+ assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14],
+ [1, 2, 3, 4, 5, 6, 7]])
+
+ # check that sub-arrays keep the order of their values
+ d_attrib_rev = d[['x2', 'x1', 'x0']]
+ dd_attrib_rev = structured_to_unstructured(d_attrib_rev)
+ assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1],
+ [11, 12, 13, 14, 9, 10, 8]])
+
+ # with ignored field at the end
+ d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32),
+ (8, [9, 10], [[11, 12], [13, 14]], 64)],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2))), ('ignored', 'u1')])
+ dd = structured_to_unstructured(d[['x0', 'x1', 'x2']])
+ assert_(np.shares_memory(dd, d))
+ assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7],
+ [8, 9, 10, 11, 12, 13, 14]])
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
@@ -318,6 +345,35 @@ class TestRecFunctions:
assert_raises(NotImplementedError, unstructured_to_structured,
np.zeros((3,0), dtype=np.int32))
+ # test supported ndarray subclasses
+ d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')])
+ dd_expected = structured_to_unstructured(d_plain, copy=True)
+
+ # recarray
+ d = d_plain.view(np.recarray)
+
+ dd = structured_to_unstructured(d, copy=False)
+ ddd = structured_to_unstructured(d, copy=True)
+ assert_(np.shares_memory(d, dd))
+ assert_(type(dd) is np.recarray)
+ assert_(type(ddd) is np.recarray)
+ assert_equal(dd, dd_expected)
+ assert_equal(ddd, dd_expected)
+
+ # memmap
+ d = np.memmap(tmp_path / 'memmap',
+ mode='w+',
+ dtype=d_plain.dtype,
+ shape=d_plain.shape)
+ d[:] = d_plain
+ dd = structured_to_unstructured(d, copy=False)
+ ddd = structured_to_unstructured(d, copy=True)
+ assert_(np.shares_memory(d, dd))
+ assert_(type(dd) is np.memmap)
+ assert_(type(ddd) is np.memmap)
+ assert_equal(dd, dd_expected)
+ assert_equal(ddd, dd_expected)
+
def test_unstructured_to_structured(self):
# test if dtype is the args of np.dtype
a = np.zeros((20, 2))
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_shape_base.py b/contrib/python/numpy/py3/numpy/lib/tests/test_shape_base.py
index 76058cf20f..eb6628904b 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_shape_base.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_shape_base.py
@@ -492,7 +492,7 @@ class TestColumnStack:
assert_equal(actual, expected)
def test_generator(self):
- with assert_warns(FutureWarning):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
column_stack((np.arange(3) for _ in range(2)))
@@ -529,7 +529,7 @@ class TestDstack:
assert_array_equal(res, desired)
def test_generator(self):
- with assert_warns(FutureWarning):
+ with pytest.raises(TypeError, match="arrays to stack must be"):
dstack((np.arange(3) for _ in range(2)))
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_twodim_base.py b/contrib/python/numpy/py3/numpy/lib/tests/test_twodim_base.py
index db02177fa4..eb008c6002 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_twodim_base.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_twodim_base.py
@@ -4,20 +4,14 @@
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
assert_array_almost_equal, assert_raises, assert_
- )
-
+)
from numpy import (
arange, add, fliplr, flipud, zeros, ones, eye, array, diag, histogram2d,
tri, mask_indices, triu_indices, triu_indices_from, tril_indices,
tril_indices_from, vander,
- )
-
+)
import numpy as np
-
-from __tests__.core.tests.test_overrides import requires_array_function
-
-
import pytest
@@ -283,7 +277,6 @@ class TestHistogram2d:
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
- @requires_array_function
def test_dispatch(self):
class ShouldDispatch:
def __array_function__(self, function, types, args, kwargs):
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_type_check.py b/contrib/python/numpy/py3/numpy/lib/tests/test_type_check.py
index 3f4ca63096..ea03261391 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_type_check.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_type_check.py
@@ -155,7 +155,7 @@ class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
- assert_(not np.sometrue(res, axis=0))
+ assert_(not np.any(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_ufunclike.py b/contrib/python/numpy/py3/numpy/lib/tests/test_ufunclike.py
index c280b69692..fac4f41d09 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_ufunclike.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_ufunclike.py
@@ -80,12 +80,6 @@ class TestUfunclike:
assert_(isinstance(f0d, MyArray))
assert_equal(f0d.metadata, 'bar')
- def test_deprecated(self):
- # NumPy 1.13.0, 2017-04-26
- assert_warns(DeprecationWarning, ufl.fix, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isposinf, [1, 2], y=nx.empty(2))
- assert_warns(DeprecationWarning, ufl.isneginf, [1, 2], y=nx.empty(2))
-
def test_scalar(self):
x = np.inf
actual = np.isposinf(x)
diff --git a/contrib/python/numpy/py3/numpy/lib/tests/test_utils.py b/contrib/python/numpy/py3/numpy/lib/tests/test_utils.py
index 6ad4bfe6d8..45416b0597 100644
--- a/contrib/python/numpy/py3/numpy/lib/tests/test_utils.py
+++ b/contrib/python/numpy/py3/numpy/lib/tests/test_utils.py
@@ -2,6 +2,7 @@ import inspect
import sys
import pytest
+import numpy as np
from numpy.core import arange
from numpy.testing import assert_, assert_equal, assert_raises_regex
from numpy.lib import deprecate, deprecate_with_doc
@@ -176,3 +177,52 @@ def test_info_method_heading():
assert _has_method_heading(WithPublicMethods)
assert not _has_method_heading(NoPublicMethods)
+
+
+def test_drop_metadata():
+ def _compare_dtypes(dt1, dt2):
+ return np.can_cast(dt1, dt2, casting='no')
+
+ # structured dtype
+ dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])],
+ metadata={'msg': 'titi'})
+ dt_m = utils.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+ assert dt_m['l1'].metadata is None
+ assert dt_m['l1']['l2'].metadata is None
+
+ # alignement
+ dt = np.dtype([('x', '<f8'), ('y', '<i4')],
+ align=True,
+ metadata={'msg': 'toto'})
+ dt_m = utils.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+
+ # subdtype
+ dt = np.dtype('8f',
+ metadata={'msg': 'toto'})
+ dt_m = utils.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+
+ # scalar
+ dt = np.dtype('uint32',
+ metadata={'msg': 'toto'})
+ dt_m = utils.drop_metadata(dt)
+ assert _compare_dtypes(dt, dt_m) is True
+ assert dt_m.metadata is None
+
+
+@pytest.mark.parametrize("dtype",
+ [np.dtype("i,i,i,i")[["f1", "f3"]],
+ np.dtype("f8"),
+ np.dtype("10i")])
+def test_drop_metadata_identity_and_copy(dtype):
+ # If there is no metadata, the identity is preserved:
+ assert utils.drop_metadata(dtype) is dtype
+
+ # If there is any, it is dropped (subforms are checked above)
+ dtype = np.dtype(dtype, metadata={1: 2})
+ assert utils.drop_metadata(dtype).metadata is None