aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/jmespath/tests/test_compliance.py
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/python/jmespath/tests/test_compliance.py
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/python/jmespath/tests/test_compliance.py')
-rw-r--r--contrib/python/jmespath/tests/test_compliance.py204
1 files changed, 102 insertions, 102 deletions
diff --git a/contrib/python/jmespath/tests/test_compliance.py b/contrib/python/jmespath/tests/test_compliance.py
index 86e8297027..9b79a8fde9 100644
--- a/contrib/python/jmespath/tests/test_compliance.py
+++ b/contrib/python/jmespath/tests/test_compliance.py
@@ -1,65 +1,65 @@
-import os
+import os
import pytest
-from pprint import pformat
-from . import OrderedDict
-from . import json
-
-from jmespath.visitor import Options
-
-
-TEST_DIR = os.path.dirname(os.path.abspath(__file__))
-COMPLIANCE_DIR = os.path.join(TEST_DIR, 'compliance')
-LEGACY_DIR = os.path.join(TEST_DIR, 'legacy')
-NOT_SPECIFIED = object()
-OPTIONS = Options(dict_cls=OrderedDict)
-
-
+from pprint import pformat
+from . import OrderedDict
+from . import json
+
+from jmespath.visitor import Options
+
+
+TEST_DIR = os.path.dirname(os.path.abspath(__file__))
+COMPLIANCE_DIR = os.path.join(TEST_DIR, 'compliance')
+LEGACY_DIR = os.path.join(TEST_DIR, 'legacy')
+NOT_SPECIFIED = object()
+OPTIONS = Options(dict_cls=OrderedDict)
+
+
def _load_all_cases():
- for full_path in _walk_files():
- if full_path.endswith('.json'):
- for given, test_type, test_data in load_cases(full_path):
- t = test_data
- # Benchmark tests aren't run as part of the normal
- # test suite, so we only care about 'result' and
- # 'error' test_types.
- if test_type == 'result':
+ for full_path in _walk_files():
+ if full_path.endswith('.json'):
+ for given, test_type, test_data in load_cases(full_path):
+ t = test_data
+ # Benchmark tests aren't run as part of the normal
+ # test suite, so we only care about 'result' and
+ # 'error' test_types.
+ if test_type == 'result':
yield (given, t['expression'], t['result'], os.path.basename(full_path))
- elif test_type == 'error':
+ elif test_type == 'error':
yield (given, t['expression'], t['error'], os.path.basename(full_path))
-
-
-def _walk_files():
- # Check for a shortcut when running the tests interactively.
- # If a JMESPATH_TEST is defined, that file is used as the
- # only test to run. Useful when doing feature development.
- single_file = os.environ.get('JMESPATH_TEST')
- if single_file is not None:
- yield os.path.abspath(single_file)
- else:
- for root, dirnames, filenames in os.walk(TEST_DIR):
- for filename in filenames:
- yield os.path.join(root, filename)
- for root, dirnames, filenames in os.walk(LEGACY_DIR):
- for filename in filenames:
- yield os.path.join(root, filename)
-
-
-def load_cases(full_path):
- all_test_data = json.load(open(full_path), object_pairs_hook=OrderedDict)
- for test_data in all_test_data:
- given = test_data['given']
- for case in test_data['cases']:
- if 'result' in case:
- test_type = 'result'
- elif 'error' in case:
- test_type = 'error'
- elif 'bench' in case:
- test_type = 'bench'
- else:
- raise RuntimeError("Unknown test type: %s" % json.dumps(case))
- yield (given, test_type, case)
-
-
+
+
+def _walk_files():
+ # Check for a shortcut when running the tests interactively.
+ # If a JMESPATH_TEST is defined, that file is used as the
+ # only test to run. Useful when doing feature development.
+ single_file = os.environ.get('JMESPATH_TEST')
+ if single_file is not None:
+ yield os.path.abspath(single_file)
+ else:
+ for root, dirnames, filenames in os.walk(TEST_DIR):
+ for filename in filenames:
+ yield os.path.join(root, filename)
+ for root, dirnames, filenames in os.walk(LEGACY_DIR):
+ for filename in filenames:
+ yield os.path.join(root, filename)
+
+
+def load_cases(full_path):
+ all_test_data = json.load(open(full_path), object_pairs_hook=OrderedDict)
+ for test_data in all_test_data:
+ given = test_data['given']
+ for case in test_data['cases']:
+ if 'result' in case:
+ test_type = 'result'
+ elif 'error' in case:
+ test_type = 'error'
+ elif 'bench' in case:
+ test_type = 'bench'
+ else:
+ raise RuntimeError("Unknown test type: %s" % json.dumps(case))
+ yield (given, test_type, case)
+
+
@pytest.mark.parametrize(
'given,expression,expected,filename',
list(_load_all_cases())
@@ -68,47 +68,47 @@ def test_compliance(given, expression, expected, filename):
_test_expression(given, expression, expected, filename)
-def _test_expression(given, expression, expected, filename):
- import jmespath.parser
- try:
- parsed = jmespath.compile(expression)
- except ValueError as e:
- raise AssertionError(
- 'jmespath expression failed to compile: "%s", error: %s"' %
- (expression, e))
- actual = parsed.search(given, options=OPTIONS)
- expected_repr = json.dumps(expected, indent=4)
- actual_repr = json.dumps(actual, indent=4)
- error_msg = ("\n\n (%s) The expression '%s' was suppose to give:\n%s\n"
- "Instead it matched:\n%s\nparsed as:\n%s\ngiven:\n%s" % (
- filename, expression, expected_repr,
- actual_repr, pformat(parsed.parsed),
- json.dumps(given, indent=4)))
- error_msg = error_msg.replace(r'\n', '\n')
- assert actua == expected, error_msg
-
-
-def _test_error_expression(given, expression, error, filename):
- import jmespath.parser
- if error not in ('syntax', 'invalid-type',
- 'unknown-function', 'invalid-arity', 'invalid-value'):
- raise RuntimeError("Unknown error type '%s'" % error)
- try:
- parsed = jmespath.compile(expression)
- parsed.search(given)
- except ValueError:
- # Test passes, it raised a parse error as expected.
- pass
- except Exception as e:
- # Failure because an unexpected exception was raised.
- error_msg = ("\n\n (%s) The expression '%s' was suppose to be a "
- "syntax error, but it raised an unexpected error:\n\n%s" % (
- filename, expression, e))
- error_msg = error_msg.replace(r'\n', '\n')
- raise AssertionError(error_msg)
- else:
- error_msg = ("\n\n (%s) The expression '%s' was suppose to be a "
- "syntax error, but it successfully parsed as:\n\n%s" % (
- filename, expression, pformat(parsed.parsed)))
- error_msg = error_msg.replace(r'\n', '\n')
- raise AssertionError(error_msg)
+def _test_expression(given, expression, expected, filename):
+ import jmespath.parser
+ try:
+ parsed = jmespath.compile(expression)
+ except ValueError as e:
+ raise AssertionError(
+ 'jmespath expression failed to compile: "%s", error: %s"' %
+ (expression, e))
+ actual = parsed.search(given, options=OPTIONS)
+ expected_repr = json.dumps(expected, indent=4)
+ actual_repr = json.dumps(actual, indent=4)
+ error_msg = ("\n\n (%s) The expression '%s' was suppose to give:\n%s\n"
+ "Instead it matched:\n%s\nparsed as:\n%s\ngiven:\n%s" % (
+ filename, expression, expected_repr,
+ actual_repr, pformat(parsed.parsed),
+ json.dumps(given, indent=4)))
+ error_msg = error_msg.replace(r'\n', '\n')
+ assert actua == expected, error_msg
+
+
+def _test_error_expression(given, expression, error, filename):
+ import jmespath.parser
+ if error not in ('syntax', 'invalid-type',
+ 'unknown-function', 'invalid-arity', 'invalid-value'):
+ raise RuntimeError("Unknown error type '%s'" % error)
+ try:
+ parsed = jmespath.compile(expression)
+ parsed.search(given)
+ except ValueError:
+ # Test passes, it raised a parse error as expected.
+ pass
+ except Exception as e:
+ # Failure because an unexpected exception was raised.
+ error_msg = ("\n\n (%s) The expression '%s' was suppose to be a "
+ "syntax error, but it raised an unexpected error:\n\n%s" % (
+ filename, expression, e))
+ error_msg = error_msg.replace(r'\n', '\n')
+ raise AssertionError(error_msg)
+ else:
+ error_msg = ("\n\n (%s) The expression '%s' was suppose to be a "
+ "syntax error, but it successfully parsed as:\n\n%s" % (
+ filename, expression, pformat(parsed.parsed)))
+ error_msg = error_msg.replace(r'\n', '\n')
+ raise AssertionError(error_msg)