summaryrefslogtreecommitdiffstats
path: root/contrib/python/wcwidth/py3/tests
diff options
context:
space:
mode:
authorrobot-piglet <[email protected]>2025-10-08 09:57:44 +0300
committerrobot-piglet <[email protected]>2025-10-08 10:11:25 +0300
commit94c10d61d66a7b6d830dbcb419ad44b330066765 (patch)
tree421761b447c002ffbdc753aebf3d812f38f9d9e1 /contrib/python/wcwidth/py3/tests
parent0f3f07ccb038beab419ba7ac850d95885f624989 (diff)
Intermediate changes
commit_hash:572d11de7e4c4fd16eeb91e832abea46ff94a4f2
Diffstat (limited to 'contrib/python/wcwidth/py3/tests')
-rw-r--r--contrib/python/wcwidth/py3/tests/test_core.py96
-rw-r--r--contrib/python/wcwidth/py3/tests/test_emojis.py85
-rw-r--r--contrib/python/wcwidth/py3/tests/test_table_integrity.py5
-rw-r--r--contrib/python/wcwidth/py3/tests/test_ucslevel.py13
4 files changed, 96 insertions, 103 deletions
diff --git a/contrib/python/wcwidth/py3/tests/test_core.py b/contrib/python/wcwidth/py3/tests/test_core.py
index 60ed6b1cde7..206bbdcab38 100644
--- a/contrib/python/wcwidth/py3/tests/test_core.py
+++ b/contrib/python/wcwidth/py3/tests/test_core.py
@@ -1,4 +1,3 @@
-# coding: utf-8
"""Core tests for wcwidth module. isort:skip_file"""
try:
# std import
@@ -10,13 +9,6 @@ except ImportError:
# local
import wcwidth
-try:
- # python 2
- _ = unichr
-except NameError:
- # python 3
- unichr = chr
-
def test_package_version():
"""wcwidth.__version__ is expected value."""
@@ -70,7 +62,7 @@ def basic_string_type():
def test_hello_jp():
- u"""
+ """
Width of Japanese phrase: コンニチハ, セカイ!
Given a phrase of 5 and 3 Katakana ideographs, joined with
@@ -78,7 +70,7 @@ def test_hello_jp():
phrase consumes 19 cells of a terminal emulator.
"""
# given,
- phrase = u'コンニチハ, セカイ!'
+ phrase = 'コンニチハ, セカイ!'
expect_length_each = (2, 2, 2, 2, 2, 1, 1, 2, 2, 2, 1)
expect_length_phrase = sum(expect_length_each)
@@ -99,7 +91,7 @@ def test_wcswidth_substr():
to stop counting length.
"""
# given,
- phrase = u'コンニチハ, セカイ!'
+ phrase = 'コンニチハ, セカイ!'
end = 7
expect_length_each = (2, 2, 2, 2, 2, 1, 1,)
expect_length_phrase = sum(expect_length_each)
@@ -116,7 +108,7 @@ def test_wcswidth_substr():
def test_null_width_0():
"""NULL (0) reports width 0."""
# given,
- phrase = u'abc\x00def'
+ phrase = 'abc\x00def'
expect_length_each = (1, 1, 1, 0, 1, 1, 1)
expect_length_phrase = sum(expect_length_each)
@@ -140,7 +132,7 @@ def test_control_c0_width_negative_1():
any string containing the C1 control character \x1b (ESC).
"""
# given,
- phrase = u'\x1b[0m'
+ phrase = '\x1b[0m'
expect_length_each = (-1, 1, 1, 1)
expect_length_phrase = -1
@@ -156,7 +148,7 @@ def test_control_c0_width_negative_1():
def test_combining_width():
"""Simple test combining reports total width of 4."""
# given,
- phrase = u'--\u05bf--'
+ phrase = '--\u05bf--'
expect_length_each = (1, 1, 0, 1, 1)
expect_length_phrase = 4
@@ -170,8 +162,8 @@ def test_combining_width():
def test_combining_cafe():
- u"""Phrase cafe + COMBINING ACUTE ACCENT is café of length 4."""
- phrase = u"cafe\u0301"
+ """Phrase cafe + COMBINING ACUTE ACCENT is café of length 4."""
+ phrase = "cafe\u0301"
expect_length_each = (1, 1, 1, 1, 0)
expect_length_phrase = 4
@@ -185,8 +177,8 @@ def test_combining_cafe():
def test_combining_enclosing():
- u"""CYRILLIC CAPITAL LETTER A + COMBINING CYRILLIC HUNDRED THOUSANDS SIGN is of length 1."""
- phrase = u"\u0410\u0488"
+ """CYRILLIC CAPITAL LETTER A + COMBINING CYRILLIC HUNDRED THOUSANDS SIGN is of length 1."""
+ phrase = "\u0410\u0488"
expect_length_each = (1, 0)
expect_length_phrase = 1
@@ -200,16 +192,16 @@ def test_combining_enclosing():
def test_balinese_script():
- u"""
+ """
Balinese kapal (ship) is length 3.
This may be an example that is not yet correctly rendered by any terminal so
far, like devanagari.
"""
- phrase = (u"\u1B13" # Category 'Lo', EAW 'N' -- BALINESE LETTER KA
- u"\u1B28" # Category 'Lo', EAW 'N' -- BALINESE LETTER PA KAPAL
- u"\u1B2E" # Category 'Lo', EAW 'N' -- BALINESE LETTER LA
- u"\u1B44") # Category 'Mc', EAW 'N' -- BALINESE ADEG ADEG
+ phrase = ("\u1B13" # Category 'Lo', EAW 'N' -- BALINESE LETTER KA
+ "\u1B28" # Category 'Lo', EAW 'N' -- BALINESE LETTER PA KAPAL
+ "\u1B2E" # Category 'Lo', EAW 'N' -- BALINESE LETTER LA
+ "\u1B44") # Category 'Mc', EAW 'N' -- BALINESE ADEG ADEG
expect_length_each = (1, 1, 1, 0)
expect_length_phrase = 3
@@ -237,8 +229,8 @@ def test_kr_jamo():
# and not by independent display, like other zero-width characters that may
# only combine with an appropriate preceding character.
phrase = (
- u"\u1100" # ᄀ HANGUL CHOSEONG KIYEOK (consonant)
- u"\u1161" # ᅡ HANGUL JUNGSEONG A (vowel)
+ "\u1100" # ᄀ HANGUL CHOSEONG KIYEOK (consonant)
+ "\u1161" # ᅡ HANGUL JUNGSEONG A (vowel)
)
expect_length_each = (2, 0)
expect_length_phrase = 2
@@ -253,14 +245,14 @@ def test_kr_jamo():
def test_kr_jamo_filler():
- u"""
+ """
Jamo filler is 0 width.
Example from https://www.unicode.org/L2/L2006/06310-hangul-decompose9.pdf
"""
phrase = (
- u"\u1100" # HANGUL CHOSEONG KIYEOK (consonant)
- u"\u1160" # HANGUL JUNGSEONG FILLER (vowel)
+ "\u1100" # HANGUL CHOSEONG KIYEOK (consonant)
+ "\u1160" # HANGUL JUNGSEONG FILLER (vowel)
)
expect_length_each = (2, 0)
expect_length_phrase = 2
@@ -305,10 +297,10 @@ def test_devanagari_script():
# as a sum of each individual width, as this library currently performs with exception of
# ZWJ, but I think it incorrectly gestures what a stateless call to wcwidth.wcwidth of
# each codepoint *should* return.
- phrase = (u"\u0915" # Akhand, Category 'Lo', East Asian Width property 'N' -- DEVANAGARI LETTER KA
- u"\u094D" # Joiner, Category 'Mn', East Asian Width property 'N' -- DEVANAGARI SIGN VIRAMA
- u"\u0937" # Fused, Category 'Lo', East Asian Width property 'N' -- DEVANAGARI LETTER SSA
- u"\u093F") # MatraL, Category 'Mc', East Asian Width property 'N' -- DEVANAGARI VOWEL SIGN I
+ phrase = ("\u0915" # Akhand, Category 'Lo', East Asian Width property 'N' -- DEVANAGARI LETTER KA
+ "\u094D" # Joiner, Category 'Mn', East Asian Width property 'N' -- DEVANAGARI SIGN VIRAMA
+ "\u0937" # Fused, Category 'Lo', East Asian Width property 'N' -- DEVANAGARI LETTER SSA
+ "\u093F") # MatraL, Category 'Mc', East Asian Width property 'N' -- DEVANAGARI VOWEL SIGN I
# 23107-terminal-suppt.pdf suggests wcwidth.wcwidth should return (2, 0, 0, 1)
expect_length_each = (1, 0, 1, 0)
# I believe the final width *should* be 3.
@@ -325,10 +317,10 @@ def test_devanagari_script():
def test_tamil_script():
# This test adapted from https://www.unicode.org/L2/L2023/23107-terminal-suppt.pdf
- phrase = (u"\u0b95" # Akhand, Category 'Lo', East Asian Width property 'N' -- TAMIL LETTER KA
- u"\u0bcd" # Joiner, Category 'Mn', East Asian Width property 'N' -- TAMIL SIGN VIRAMA
- u"\u0bb7" # Fused, Category 'Lo', East Asian Width property 'N' -- TAMIL LETTER SSA
- u"\u0bcc") # MatraLR, Category 'Mc', East Asian Width property 'N' -- TAMIL VOWEL SIGN AU
+ phrase = ("\u0b95" # Akhand, Category 'Lo', East Asian Width property 'N' -- TAMIL LETTER KA
+ "\u0bcd" # Joiner, Category 'Mn', East Asian Width property 'N' -- TAMIL SIGN VIRAMA
+ "\u0bb7" # Fused, Category 'Lo', East Asian Width property 'N' -- TAMIL LETTER SSA
+ "\u0bcc") # MatraLR, Category 'Mc', East Asian Width property 'N' -- TAMIL VOWEL SIGN AU
# 23107-terminal-suppt.pdf suggests wcwidth.wcwidth should return (3, 0, 0, 4)
expect_length_each = (1, 0, 1, 0)
@@ -348,10 +340,10 @@ def test_kannada_script():
# This test adapted from https://www.unicode.org/L2/L2023/23107-terminal-suppt.pdf
# |ರ್ಝೈ|
# |123|
- phrase = (u"\u0cb0" # Repha, Category 'Lo', East Asian Width property 'N' -- KANNADA LETTER RA
- u"\u0ccd" # Joiner, Category 'Mn', East Asian Width property 'N' -- KANNADA SIGN VIRAMA
- u"\u0c9d" # Base, Category 'Lo', East Asian Width property 'N' -- KANNADA LETTER JHA
- u"\u0cc8") # MatraUR, Category 'Mc', East Asian Width property 'N' -- KANNADA VOWEL SIGN AI
+ phrase = ("\u0cb0" # Repha, Category 'Lo', East Asian Width property 'N' -- KANNADA LETTER RA
+ "\u0ccd" # Joiner, Category 'Mn', East Asian Width property 'N' -- KANNADA SIGN VIRAMA
+ "\u0c9d" # Base, Category 'Lo', East Asian Width property 'N' -- KANNADA LETTER JHA
+ "\u0cc8") # MatraUR, Category 'Mc', East Asian Width property 'N' -- KANNADA VOWEL SIGN AI
# 23107-terminal-suppt.pdf suggests should be (2, 0, 3, 1)
expect_length_each = (1, 0, 1, 0)
# I believe the correct final width *should* be 3 or 4.
@@ -370,10 +362,10 @@ def test_kannada_script_2():
# This test adapted from https://www.unicode.org/L2/L2023/23107-terminal-suppt.pdf
# |ರ಼್ಚ|
# |12|
- phrase = (u"\u0cb0" # Base, Category 'Lo', East Asian Width property 'N' -- KANNADA LETTER RA
- u"\u0cbc" # Nukta, Category 'Mn', East Asian Width property 'N' -- KANNADA SIGN NUKTA
- u"\u0ccd" # Joiner, Category 'Lo', East Asian Width property 'N' -- KANNADA SIGN VIRAMA
- u"\u0c9a") # Subjoin, Category 'Mc', East Asian Width property 'N' -- KANNADA LETTER CA
+ phrase = ("\u0cb0" # Base, Category 'Lo', East Asian Width property 'N' -- KANNADA LETTER RA
+ "\u0cbc" # Nukta, Category 'Mn', East Asian Width property 'N' -- KANNADA SIGN NUKTA
+ "\u0ccd" # Joiner, Category 'Lo', East Asian Width property 'N' -- KANNADA SIGN VIRAMA
+ "\u0c9a") # Subjoin, Category 'Mc', East Asian Width property 'N' -- KANNADA LETTER CA
# 23107-terminal-suppt.pdf suggests wcwidth.wcwidth should return (2, 0, 0, 1)
expect_length_each = (1, 0, 0, 1)
# I believe the final width is correct, but maybe for the wrong reasons!
@@ -392,11 +384,17 @@ def test_zero_wide_conflict():
# Test characters considered both "wide" and "zero" width
# - (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
# + (0x03000, 0x03029,), # Ideographic Space ..Hangzhou Numeral Nine
- assert wcwidth.wcwidth(unichr(0x03029), unicode_version='4.1.0') == 2
- assert wcwidth.wcwidth(unichr(0x0302a), unicode_version='4.1.0') == 0
+ assert wcwidth.wcwidth(chr(0x03029), unicode_version='4.1.0') == 2
+ assert wcwidth.wcwidth(chr(0x0302a), unicode_version='4.1.0') == 0
# - (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
# + (0x0309b, 0x030ff,), # Katakana-hiragana Voiced..Katakana Digraph Koto
- assert wcwidth.wcwidth(unichr(0x03099), unicode_version='4.1.0') == 0
- assert wcwidth.wcwidth(unichr(0x0309a), unicode_version='4.1.0') == 0
- assert wcwidth.wcwidth(unichr(0x0309b), unicode_version='4.1.0') == 2
+ assert wcwidth.wcwidth(chr(0x03099), unicode_version='4.1.0') == 0
+ assert wcwidth.wcwidth(chr(0x0309a), unicode_version='4.1.0') == 0
+ assert wcwidth.wcwidth(chr(0x0309b), unicode_version='4.1.0') == 2
+
+def test_soft_hyphen():
+ # Test SOFT HYPHEN, category 'Cf' usually are zero-width, but most
+ # implementations agree to draw it was '1' cell, visually
+ # indistinguishable from a space, ' ' in Konsole, for example.
+ assert wcwidth.wcwidth(chr(0x000ad)) == 1
diff --git a/contrib/python/wcwidth/py3/tests/test_emojis.py b/contrib/python/wcwidth/py3/tests/test_emojis.py
index 4f88e2330e5..310d0c3d44e 100644
--- a/contrib/python/wcwidth/py3/tests/test_emojis.py
+++ b/contrib/python/wcwidth/py3/tests/test_emojis.py
@@ -1,22 +1,14 @@
# std imports
import os
-import codecs
# 3rd party
import pytest
-try:
- # python 2
- _ = unichr
-except NameError:
- # python 3
- unichr = chr
-
# some tests cannot be done on some builds of python, where the internal
# unicode structure is limited to 0x10000 for memory conservation,
# "ValueError: unichr() arg not in range(0x10000) (narrow Python build)"
try:
- unichr(0x2fffe)
+ chr(0x2fffe)
NARROW_ONLY = False
except ValueError:
NARROW_ONLY = True
@@ -27,18 +19,18 @@ import wcwidth
def make_sequence_from_line(line):
# convert '002A FE0F ; ..' -> (0x2a, 0xfe0f) -> chr(0x2a) + chr(0xfe0f)
- return ''.join(unichr(int(cp, 16)) for cp in line.split(';', 1)[0].strip().split())
+ return ''.join(chr(int(cp, 16)) for cp in line.split(';', 1)[0].strip().split())
@pytest.mark.skipif(NARROW_ONLY, reason="Test cannot verify on python 'narrow' builds")
def emoji_zwj_sequence():
- u"""
+ """
Emoji zwj sequence of four codepoints is just 2 cells.
"""
- phrase = (u"\U0001f469" # Base, Category So, East Asian Width property 'W' -- WOMAN
- u"\U0001f3fb" # Modifier, Category Sk, East Asian Width property 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
- u"\u200d" # Joiner, Category Cf, East Asian Width property 'N' -- ZERO WIDTH JOINER
- u"\U0001f4bb") # Fused, Category So, East Asian Width peroperty 'W' -- PERSONAL COMPUTER
+ phrase = ("\U0001f469" # Base, Category So, East Asian Width property 'W' -- WOMAN
+ "\U0001f3fb" # Modifier, Category Sk, East Asian Width property 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
+ "\u200d" # Joiner, Category Cf, East Asian Width property 'N' -- ZERO WIDTH JOINER
+ "\U0001f4bb") # Fused, Category So, East Asian Width peroperty 'W' -- PERSONAL COMPUTER
# This test adapted from https://www.unicode.org/L2/L2023/23107-terminal-suppt.pdf
expect_length_each = (2, 0, 0, 2)
expect_length_phrase = 2
@@ -54,12 +46,12 @@ def emoji_zwj_sequence():
@pytest.mark.skipif(NARROW_ONLY, reason="Test cannot verify on python 'narrow' builds")
def test_unfinished_zwj_sequence():
- u"""
+ """
Ensure index-out-of-bounds does not occur for zero-width joiner without any following character
"""
- phrase = (u"\U0001f469" # Base, Category So, East Asian Width property 'W' -- WOMAN
- u"\U0001f3fb" # Modifier, Category Sk, East Asian Width property 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
- u"\u200d") # Joiner, Category Cf, East Asian Width property 'N' -- ZERO WIDTH JOINER
+ phrase = ("\U0001f469" # Base, Category So, East Asian Width property 'W' -- WOMAN
+ "\U0001f3fb" # Modifier, Category Sk, East Asian Width property 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
+ "\u200d") # Joiner, Category Cf, East Asian Width property 'N' -- ZERO WIDTH JOINER
expect_length_each = (2, 0, 0)
expect_length_phrase = 2
@@ -77,9 +69,9 @@ def test_non_recommended_zwj_sequence():
"""
Verify ZWJ is measured as though successful with characters that cannot be joined, wcwidth does not verify
"""
- phrase = (u"\U0001f469" # Base, Category So, East Asian Width property 'W' -- WOMAN
- u"\U0001f3fb" # Modifier, Category Sk, East Asian Width property 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
- u"\u200d") # Joiner, Category Cf, East Asian Width property 'N' -- ZERO WIDTH JOINER
+ phrase = ("\U0001f469" # Base, Category So, East Asian Width property 'W' -- WOMAN
+ "\U0001f3fb" # Modifier, Category Sk, East Asian Width property 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
+ "\u200d") # Joiner, Category Cf, East Asian Width property 'N' -- ZERO WIDTH JOINER
expect_length_each = (2, 0, 0)
expect_length_phrase = 2
@@ -95,11 +87,11 @@ def test_non_recommended_zwj_sequence():
@pytest.mark.skipif(NARROW_ONLY, reason="Test cannot verify on python 'narrow' builds")
def test_another_emoji_zwj_sequence():
phrase = (
- u"\u26F9" # PERSON WITH BALL
- u"\U0001F3FB" # EMOJI MODIFIER FITZPATRICK TYPE-1-2
- u"\u200D" # ZERO WIDTH JOINER
- u"\u2640" # FEMALE SIGN
- u"\uFE0F") # VARIATION SELECTOR-16
+ "\u26F9" # PERSON WITH BALL
+ "\U0001F3FB" # EMOJI MODIFIER FITZPATRICK TYPE-1-2
+ "\u200D" # ZERO WIDTH JOINER
+ "\u2640" # FEMALE SIGN
+ "\uFE0F") # VARIATION SELECTOR-16
expect_length_each = (1, 0, 0, 1, 0)
expect_length_phrase = 2
@@ -121,17 +113,17 @@ def test_longer_emoji_zwj_sequence():
in a single function call.
"""
# 'Category Code', 'East Asian Width property' -- 'description'
- phrase = (u"\U0001F9D1" # 'So', 'W' -- ADULT
- u"\U0001F3FB" # 'Sk', 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
- u"\u200d" # 'Cf', 'N' -- ZERO WIDTH JOINER
- u"\u2764" # 'So', 'N' -- HEAVY BLACK HEART
- u"\uFE0F" # 'Mn', 'A' -- VARIATION SELECTOR-16
- u"\u200d" # 'Cf', 'N' -- ZERO WIDTH JOINER
- u"\U0001F48B" # 'So', 'W' -- KISS MARK
- u"\u200d" # 'Cf', 'N' -- ZERO WIDTH JOINER
- u"\U0001F9D1" # 'So', 'W' -- ADULT
- u"\U0001F3FD" # 'Sk', 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-4
- ) * 2
+ phrase = ("\U0001F9D1" # 'So', 'W' -- ADULT
+ "\U0001F3FB" # 'Sk', 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-1-2
+ "\u200d" # 'Cf', 'N' -- ZERO WIDTH JOINER
+ "\u2764" # 'So', 'N' -- HEAVY BLACK HEART
+ "\uFE0F" # 'Mn', 'A' -- VARIATION SELECTOR-16
+ "\u200d" # 'Cf', 'N' -- ZERO WIDTH JOINER
+ "\U0001F48B" # 'So', 'W' -- KISS MARK
+ "\u200d" # 'Cf', 'N' -- ZERO WIDTH JOINER
+ "\U0001F9D1" # 'So', 'W' -- ADULT
+ "\U0001F3FD" # 'Sk', 'W' -- EMOJI MODIFIER FITZPATRICK TYPE-4
+ ) * 2
# This test adapted from https://www.unicode.org/L2/L2023/23107-terminal-suppt.pdf
expect_length_each = (2, 0, 0, 1, 0, 0, 2, 0, 2, 0) * 2
expect_length_phrase = 4
@@ -146,10 +138,10 @@ def test_longer_emoji_zwj_sequence():
def read_sequences_from_file(filename):
- fp = codecs.open(os.path.join(os.path.dirname(__file__), filename), 'r', encoding='utf-8')
+ fp = open(os.path.join(os.path.dirname(__file__), filename), 'r', encoding='utf-8')
lines = [line.strip()
- for line in fp.readlines()
- if not line.startswith('#') and line.strip()]
+ for line in fp.readlines()
+ if not line.startswith('#') and line.strip()]
fp.close()
sequences = [make_sequence_from_line(line) for line in lines]
return lines, sequences
@@ -212,8 +204,8 @@ def test_recommended_variation_16_sequences():
def test_unicode_9_vs16():
"""Verify effect of VS-16 on unicode_version 9.0 and later"""
- phrase = (u"\u2640" # FEMALE SIGN
- u"\uFE0F") # VARIATION SELECTOR-16
+ phrase = ("\u2640" # FEMALE SIGN
+ "\uFE0F") # VARIATION SELECTOR-16
expect_length_each = (1, 0)
expect_length_phrase = 2
@@ -226,10 +218,11 @@ def test_unicode_9_vs16():
assert length_each == expect_length_each
assert length_phrase == expect_length_phrase
+
def test_unicode_8_vs16():
"""Verify that VS-16 has no effect on unicode_version 8.0 and earler"""
- phrase = (u"\u2640" # FEMALE SIGN
- u"\uFE0F") # VARIATION SELECTOR-16
+ phrase = ("\u2640" # FEMALE SIGN
+ "\uFE0F") # VARIATION SELECTOR-16
expect_length_each = (1, 0)
expect_length_phrase = 1
@@ -240,4 +233,4 @@ def test_unicode_8_vs16():
# verify.
assert length_each == expect_length_each
- assert length_phrase == expect_length_phrase \ No newline at end of file
+ assert length_phrase == expect_length_phrase
diff --git a/contrib/python/wcwidth/py3/tests/test_table_integrity.py b/contrib/python/wcwidth/py3/tests/test_table_integrity.py
index 66e63ddbe3f..e680498162a 100644
--- a/contrib/python/wcwidth/py3/tests/test_table_integrity.py
+++ b/contrib/python/wcwidth/py3/tests/test_table_integrity.py
@@ -1,15 +1,18 @@
"""
Executes verify-table-integrity.py as a unit test.
"""
+# std imports
import os
import sys
import subprocess
+# 3rd party
import pytest
+
@pytest.mark.skipif(sys.version_info[:2] != (3, 12), reason='Test only with a single version of python')
def test_verify_table_integrity():
subprocess.check_output([sys.executable, os.path.join(os.path.dirname(__file__),
os.path.pardir,
'bin',
- 'verify-table-integrity.py')]) \ No newline at end of file
+ 'verify-table-integrity.py')])
diff --git a/contrib/python/wcwidth/py3/tests/test_ucslevel.py b/contrib/python/wcwidth/py3/tests/test_ucslevel.py
index 654e835fc72..b15fb5f5a71 100644
--- a/contrib/python/wcwidth/py3/tests/test_ucslevel.py
+++ b/contrib/python/wcwidth/py3/tests/test_ucslevel.py
@@ -1,4 +1,3 @@
-# coding: utf-8
"""Unicode version level tests for wcwidth."""
# std imports
import warnings
@@ -37,7 +36,7 @@ def test_exact_410_str():
def test_exact_410_unicode():
"""wcwidth._wcmatch_version(u'4.1.0') returns equal value (unicode)."""
# given,
- given = expected = u'4.1.0'
+ given = expected = '4.1.0'
# exercise,
result = wcwidth._wcmatch_version(given)
@@ -61,7 +60,7 @@ def test_nearest_505_str():
def test_nearest_505_unicode():
"""wcwidth._wcmatch_version(u'5.0.5') returns nearest u'5.0.0'. (unicode)"""
# given
- given, expected = u'5.0.5', u'5.0.0'
+ given, expected = '5.0.5', '5.0.0'
# exercise
result = wcwidth._wcmatch_version(given)
@@ -89,7 +88,7 @@ def test_nearest_lowint40_str():
def test_nearest_lowint40_unicode():
"""wcwidth._wcmatch_version(u'4.0') returns nearest u'4.1.0'."""
# given
- given, expected = u'4.0', u'4.1.0'
+ given, expected = '4.0', '4.1.0'
warnings.resetwarnings()
wcwidth._wcmatch_version.cache_clear()
@@ -117,7 +116,7 @@ def test_nearest_800_str():
def test_nearest_800_unicode():
"""wcwidth._wcmatch_version(u'8') returns nearest u'8.0.0'."""
# given
- given, expected = u'8', u'8.0.0'
+ given, expected = '8', '8.0.0'
# exercise
result = wcwidth._wcmatch_version(given)
@@ -141,7 +140,7 @@ def test_nearest_999_str():
def test_nearest_999_unicode():
"""wcwidth._wcmatch_version(u'999.0') returns nearest (latest)."""
# given
- given, expected = u'999.0', wcwidth.list_versions()[-1]
+ given, expected = '999.0', wcwidth.list_versions()[-1]
# exercise
result = wcwidth._wcmatch_version(given)
@@ -153,7 +152,7 @@ def test_nearest_999_unicode():
def test_nonint_unicode():
"""wcwidth._wcmatch_version(u'x.y.z') returns latest (unicode)."""
# given
- given, expected = u'x.y.z', wcwidth.list_versions()[-1]
+ given, expected = 'x.y.z', wcwidth.list_versions()[-1]
warnings.resetwarnings()
wcwidth._wcmatch_version.cache_clear()